1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
22 #include "hclge_err.h"
25 #define HCLGE_NAME "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
);
30 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
32 static int hclge_set_umv_space(struct hclge_dev
*hdev
, u16 space_size
,
33 u16
*allocated_size
, bool is_alloc
);
35 static struct hnae3_ae_algo ae_algo
;
37 static const struct pci_device_id ae_algo_pci_tbl
[] = {
38 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
39 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
40 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
41 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
42 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
45 /* required last entry */
49 MODULE_DEVICE_TABLE(pci
, ae_algo_pci_tbl
);
51 static const u32 cmdq_reg_addr_list
[] = {HCLGE_CMDQ_TX_ADDR_L_REG
,
52 HCLGE_CMDQ_TX_ADDR_H_REG
,
53 HCLGE_CMDQ_TX_DEPTH_REG
,
54 HCLGE_CMDQ_TX_TAIL_REG
,
55 HCLGE_CMDQ_TX_HEAD_REG
,
56 HCLGE_CMDQ_RX_ADDR_L_REG
,
57 HCLGE_CMDQ_RX_ADDR_H_REG
,
58 HCLGE_CMDQ_RX_DEPTH_REG
,
59 HCLGE_CMDQ_RX_TAIL_REG
,
60 HCLGE_CMDQ_RX_HEAD_REG
,
61 HCLGE_VECTOR0_CMDQ_SRC_REG
,
62 HCLGE_CMDQ_INTR_STS_REG
,
63 HCLGE_CMDQ_INTR_EN_REG
,
64 HCLGE_CMDQ_INTR_GEN_REG
};
66 static const u32 common_reg_addr_list
[] = {HCLGE_MISC_VECTOR_REG_BASE
,
67 HCLGE_VECTOR0_OTER_EN_REG
,
68 HCLGE_MISC_RESET_STS_REG
,
69 HCLGE_MISC_VECTOR_INT_STS
,
70 HCLGE_GLOBAL_RESET_REG
,
74 static const u32 ring_reg_addr_list
[] = {HCLGE_RING_RX_ADDR_L_REG
,
75 HCLGE_RING_RX_ADDR_H_REG
,
76 HCLGE_RING_RX_BD_NUM_REG
,
77 HCLGE_RING_RX_BD_LENGTH_REG
,
78 HCLGE_RING_RX_MERGE_EN_REG
,
79 HCLGE_RING_RX_TAIL_REG
,
80 HCLGE_RING_RX_HEAD_REG
,
81 HCLGE_RING_RX_FBD_NUM_REG
,
82 HCLGE_RING_RX_OFFSET_REG
,
83 HCLGE_RING_RX_FBD_OFFSET_REG
,
84 HCLGE_RING_RX_STASH_REG
,
85 HCLGE_RING_RX_BD_ERR_REG
,
86 HCLGE_RING_TX_ADDR_L_REG
,
87 HCLGE_RING_TX_ADDR_H_REG
,
88 HCLGE_RING_TX_BD_NUM_REG
,
89 HCLGE_RING_TX_PRIORITY_REG
,
91 HCLGE_RING_TX_MERGE_EN_REG
,
92 HCLGE_RING_TX_TAIL_REG
,
93 HCLGE_RING_TX_HEAD_REG
,
94 HCLGE_RING_TX_FBD_NUM_REG
,
95 HCLGE_RING_TX_OFFSET_REG
,
96 HCLGE_RING_TX_EBD_NUM_REG
,
97 HCLGE_RING_TX_EBD_OFFSET_REG
,
98 HCLGE_RING_TX_BD_ERR_REG
,
101 static const u32 tqp_intr_reg_addr_list
[] = {HCLGE_TQP_INTR_CTRL_REG
,
102 HCLGE_TQP_INTR_GL0_REG
,
103 HCLGE_TQP_INTR_GL1_REG
,
104 HCLGE_TQP_INTR_GL2_REG
,
105 HCLGE_TQP_INTR_RL_REG
};
107 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
109 "Serdes serial Loopback test",
110 "Serdes parallel Loopback test",
114 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
115 {"mac_tx_mac_pause_num",
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
117 {"mac_rx_mac_pause_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
119 {"mac_tx_pfc_pri0_pkt_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
121 {"mac_tx_pfc_pri1_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
123 {"mac_tx_pfc_pri2_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
125 {"mac_tx_pfc_pri3_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
127 {"mac_tx_pfc_pri4_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
129 {"mac_tx_pfc_pri5_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
131 {"mac_tx_pfc_pri6_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
133 {"mac_tx_pfc_pri7_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
135 {"mac_rx_pfc_pri0_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
137 {"mac_rx_pfc_pri1_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
139 {"mac_rx_pfc_pri2_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
141 {"mac_rx_pfc_pri3_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
143 {"mac_rx_pfc_pri4_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
145 {"mac_rx_pfc_pri5_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
147 {"mac_rx_pfc_pri6_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
149 {"mac_rx_pfc_pri7_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
151 {"mac_tx_total_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
153 {"mac_tx_total_oct_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
155 {"mac_tx_good_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
157 {"mac_tx_bad_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
159 {"mac_tx_good_oct_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
161 {"mac_tx_bad_oct_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
163 {"mac_tx_uni_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
165 {"mac_tx_multi_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
167 {"mac_tx_broad_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
169 {"mac_tx_undersize_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
171 {"mac_tx_oversize_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
173 {"mac_tx_64_oct_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
175 {"mac_tx_65_127_oct_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
177 {"mac_tx_128_255_oct_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
179 {"mac_tx_256_511_oct_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
181 {"mac_tx_512_1023_oct_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
183 {"mac_tx_1024_1518_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
185 {"mac_tx_1519_2047_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
187 {"mac_tx_2048_4095_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
189 {"mac_tx_4096_8191_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
191 {"mac_tx_8192_9216_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
193 {"mac_tx_9217_12287_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
195 {"mac_tx_12288_16383_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
197 {"mac_tx_1519_max_good_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
199 {"mac_tx_1519_max_bad_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
201 {"mac_rx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
203 {"mac_rx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
205 {"mac_rx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
207 {"mac_rx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
209 {"mac_rx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
211 {"mac_rx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
213 {"mac_rx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
215 {"mac_rx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
217 {"mac_rx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
219 {"mac_rx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
221 {"mac_rx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
223 {"mac_rx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
225 {"mac_rx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
227 {"mac_rx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
229 {"mac_rx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
231 {"mac_rx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
233 {"mac_rx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
235 {"mac_rx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
237 {"mac_rx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
239 {"mac_rx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
241 {"mac_rx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
243 {"mac_rx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
245 {"mac_rx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
247 {"mac_rx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
249 {"mac_rx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
252 {"mac_tx_fragment_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
254 {"mac_tx_undermin_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
256 {"mac_tx_jabber_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
258 {"mac_tx_err_all_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
260 {"mac_tx_from_app_good_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
262 {"mac_tx_from_app_bad_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
264 {"mac_rx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
266 {"mac_rx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
268 {"mac_rx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
270 {"mac_rx_fcs_err_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
272 {"mac_rx_send_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
274 {"mac_rx_send_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
278 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table
[] = {
280 .flags
= HCLGE_MAC_MGR_MASK_VLAN_B
,
281 .ethter_type
= cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP
),
282 .mac_addr_hi32
= cpu_to_le32(htonl(0x0180C200)),
283 .mac_addr_lo16
= cpu_to_le16(htons(0x000E)),
284 .i_port_bitmap
= 0x1,
288 static int hclge_mac_update_stats(struct hclge_dev
*hdev
)
290 #define HCLGE_MAC_CMD_NUM 21
291 #define HCLGE_RTN_DATA_NUM 4
293 u64
*data
= (u64
*)(&hdev
->hw_stats
.mac_stats
);
294 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
299 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
300 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
302 dev_err(&hdev
->pdev
->dev
,
303 "Get MAC pkt stats fail, status = %d.\n", ret
);
308 for (i
= 0; i
< HCLGE_MAC_CMD_NUM
; i
++) {
309 if (unlikely(i
== 0)) {
310 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
311 n
= HCLGE_RTN_DATA_NUM
- 2;
313 desc_data
= (__le64
*)(&desc
[i
]);
314 n
= HCLGE_RTN_DATA_NUM
;
316 for (k
= 0; k
< n
; k
++) {
317 *data
++ += le64_to_cpu(*desc_data
);
325 static int hclge_tqps_update_stats(struct hnae3_handle
*handle
)
327 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
328 struct hclge_vport
*vport
= hclge_get_vport(handle
);
329 struct hclge_dev
*hdev
= vport
->back
;
330 struct hnae3_queue
*queue
;
331 struct hclge_desc desc
[1];
332 struct hclge_tqp
*tqp
;
335 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
336 queue
= handle
->kinfo
.tqp
[i
];
337 tqp
= container_of(queue
, struct hclge_tqp
, q
);
338 /* command : HCLGE_OPC_QUERY_IGU_STAT */
339 hclge_cmd_setup_basic_desc(&desc
[0],
340 HCLGE_OPC_QUERY_RX_STATUS
,
343 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
344 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
346 dev_err(&hdev
->pdev
->dev
,
347 "Query tqp stat fail, status = %d,queue = %d\n",
351 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
352 le32_to_cpu(desc
[0].data
[1]);
355 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
356 queue
= handle
->kinfo
.tqp
[i
];
357 tqp
= container_of(queue
, struct hclge_tqp
, q
);
358 /* command : HCLGE_OPC_QUERY_IGU_STAT */
359 hclge_cmd_setup_basic_desc(&desc
[0],
360 HCLGE_OPC_QUERY_TX_STATUS
,
363 desc
[0].data
[0] = cpu_to_le32((tqp
->index
& 0x1ff));
364 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
366 dev_err(&hdev
->pdev
->dev
,
367 "Query tqp stat fail, status = %d,queue = %d\n",
371 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
372 le32_to_cpu(desc
[0].data
[1]);
378 static u64
*hclge_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
380 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
381 struct hclge_tqp
*tqp
;
385 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
386 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
387 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
390 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
391 tqp
= container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
392 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
398 static int hclge_tqps_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
400 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
402 return kinfo
->num_tqps
* (2);
405 static u8
*hclge_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
407 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
411 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
412 struct hclge_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
413 struct hclge_tqp
, q
);
414 snprintf(buff
, ETH_GSTRING_LEN
, "txq%d_pktnum_rcd",
416 buff
= buff
+ ETH_GSTRING_LEN
;
419 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
420 struct hclge_tqp
*tqp
= container_of(kinfo
->tqp
[i
],
421 struct hclge_tqp
, q
);
422 snprintf(buff
, ETH_GSTRING_LEN
, "rxq%d_pktnum_rcd",
424 buff
= buff
+ ETH_GSTRING_LEN
;
430 static u64
*hclge_comm_get_stats(void *comm_stats
,
431 const struct hclge_comm_stats_str strs
[],
437 for (i
= 0; i
< size
; i
++)
438 buf
[i
] = HCLGE_STATS_READ(comm_stats
, strs
[i
].offset
);
443 static u8
*hclge_comm_get_strings(u32 stringset
,
444 const struct hclge_comm_stats_str strs
[],
447 char *buff
= (char *)data
;
450 if (stringset
!= ETH_SS_STATS
)
453 for (i
= 0; i
< size
; i
++) {
454 snprintf(buff
, ETH_GSTRING_LEN
,
456 buff
= buff
+ ETH_GSTRING_LEN
;
462 static void hclge_update_netstat(struct hclge_hw_stats
*hw_stats
,
463 struct net_device_stats
*net_stats
)
465 net_stats
->tx_dropped
= 0;
466 net_stats
->rx_errors
= hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
467 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
468 net_stats
->rx_errors
+= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
470 net_stats
->multicast
= hw_stats
->mac_stats
.mac_tx_multi_pkt_num
;
471 net_stats
->multicast
+= hw_stats
->mac_stats
.mac_rx_multi_pkt_num
;
473 net_stats
->rx_crc_errors
= hw_stats
->mac_stats
.mac_rx_fcs_err_pkt_num
;
474 net_stats
->rx_length_errors
=
475 hw_stats
->mac_stats
.mac_rx_undersize_pkt_num
;
476 net_stats
->rx_length_errors
+=
477 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
478 net_stats
->rx_over_errors
=
479 hw_stats
->mac_stats
.mac_rx_oversize_pkt_num
;
482 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
484 struct hnae3_handle
*handle
;
487 handle
= &hdev
->vport
[0].nic
;
488 if (handle
->client
) {
489 status
= hclge_tqps_update_stats(handle
);
491 dev_err(&hdev
->pdev
->dev
,
492 "Update TQPS stats fail, status = %d.\n",
497 status
= hclge_mac_update_stats(hdev
);
499 dev_err(&hdev
->pdev
->dev
,
500 "Update MAC stats fail, status = %d.\n", status
);
502 hclge_update_netstat(&hdev
->hw_stats
, &handle
->kinfo
.netdev
->stats
);
505 static void hclge_update_stats(struct hnae3_handle
*handle
,
506 struct net_device_stats
*net_stats
)
508 struct hclge_vport
*vport
= hclge_get_vport(handle
);
509 struct hclge_dev
*hdev
= vport
->back
;
510 struct hclge_hw_stats
*hw_stats
= &hdev
->hw_stats
;
513 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
516 status
= hclge_mac_update_stats(hdev
);
518 dev_err(&hdev
->pdev
->dev
,
519 "Update MAC stats fail, status = %d.\n",
522 status
= hclge_tqps_update_stats(handle
);
524 dev_err(&hdev
->pdev
->dev
,
525 "Update TQPS stats fail, status = %d.\n",
528 hclge_update_netstat(hw_stats
, net_stats
);
530 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
533 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
535 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
536 HNAE3_SUPPORT_PHY_LOOPBACK |\
537 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
538 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
540 struct hclge_vport
*vport
= hclge_get_vport(handle
);
541 struct hclge_dev
*hdev
= vport
->back
;
544 /* Loopback test support rules:
545 * mac: only GE mode support
546 * serdes: all mac mode will support include GE/XGE/LGE/CGE
547 * phy: only support when phy device exist on board
549 if (stringset
== ETH_SS_TEST
) {
550 /* clear loopback bit flags at first */
551 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
552 if (hdev
->pdev
->revision
>= 0x21 ||
553 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
554 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
555 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
557 handle
->flags
|= HNAE3_SUPPORT_APP_LOOPBACK
;
561 handle
->flags
|= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
;
562 handle
->flags
|= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
;
563 } else if (stringset
== ETH_SS_STATS
) {
564 count
= ARRAY_SIZE(g_mac_stats_string
) +
565 hclge_tqps_get_sset_count(handle
, stringset
);
571 static void hclge_get_strings(struct hnae3_handle
*handle
,
575 u8
*p
= (char *)data
;
578 if (stringset
== ETH_SS_STATS
) {
579 size
= ARRAY_SIZE(g_mac_stats_string
);
580 p
= hclge_comm_get_strings(stringset
,
584 p
= hclge_tqps_get_strings(handle
, p
);
585 } else if (stringset
== ETH_SS_TEST
) {
586 if (handle
->flags
& HNAE3_SUPPORT_APP_LOOPBACK
) {
588 hns3_nic_test_strs
[HNAE3_LOOP_APP
],
590 p
+= ETH_GSTRING_LEN
;
592 if (handle
->flags
& HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
) {
594 hns3_nic_test_strs
[HNAE3_LOOP_SERIAL_SERDES
],
596 p
+= ETH_GSTRING_LEN
;
598 if (handle
->flags
& HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
) {
600 hns3_nic_test_strs
[HNAE3_LOOP_PARALLEL_SERDES
],
602 p
+= ETH_GSTRING_LEN
;
604 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
606 hns3_nic_test_strs
[HNAE3_LOOP_PHY
],
608 p
+= ETH_GSTRING_LEN
;
613 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
615 struct hclge_vport
*vport
= hclge_get_vport(handle
);
616 struct hclge_dev
*hdev
= vport
->back
;
619 p
= hclge_comm_get_stats(&hdev
->hw_stats
.mac_stats
,
621 ARRAY_SIZE(g_mac_stats_string
),
623 p
= hclge_tqps_get_stats(handle
, p
);
626 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
627 struct hclge_func_status_cmd
*status
)
629 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
632 /* Set the pf to main pf */
633 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
634 hdev
->flag
|= HCLGE_FLAG_MAIN
;
636 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
641 static int hclge_query_function_status(struct hclge_dev
*hdev
)
643 struct hclge_func_status_cmd
*req
;
644 struct hclge_desc desc
;
648 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
649 req
= (struct hclge_func_status_cmd
*)desc
.data
;
652 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
654 dev_err(&hdev
->pdev
->dev
,
655 "query function status failed %d.\n",
661 /* Check pf reset is done */
664 usleep_range(1000, 2000);
665 } while (timeout
++ < 5);
667 ret
= hclge_parse_func_status(hdev
, req
);
672 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
674 struct hclge_pf_res_cmd
*req
;
675 struct hclge_desc desc
;
678 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
679 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
681 dev_err(&hdev
->pdev
->dev
,
682 "query pf resource failed %d.\n", ret
);
686 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
687 hdev
->num_tqps
= __le16_to_cpu(req
->tqp_num
);
688 hdev
->pkt_buf_size
= __le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
690 if (hnae3_dev_roce_supported(hdev
)) {
691 hdev
->roce_base_msix_offset
=
692 hnae3_get_field(__le16_to_cpu(req
->msixcap_localid_ba_rocee
),
693 HCLGE_MSIX_OFT_ROCEE_M
, HCLGE_MSIX_OFT_ROCEE_S
);
695 hnae3_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
696 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
698 /* PF should have NIC vectors and Roce vectors,
699 * NIC vectors are queued before Roce vectors.
701 hdev
->num_msi
= hdev
->num_roce_msi
+
702 hdev
->roce_base_msix_offset
;
705 hnae3_get_field(__le16_to_cpu(req
->pf_intr_vector_number
),
706 HCLGE_PF_VEC_NUM_M
, HCLGE_PF_VEC_NUM_S
);
712 static int hclge_parse_speed(int speed_cmd
, int *speed
)
716 *speed
= HCLGE_MAC_SPEED_10M
;
719 *speed
= HCLGE_MAC_SPEED_100M
;
722 *speed
= HCLGE_MAC_SPEED_1G
;
725 *speed
= HCLGE_MAC_SPEED_10G
;
728 *speed
= HCLGE_MAC_SPEED_25G
;
731 *speed
= HCLGE_MAC_SPEED_40G
;
734 *speed
= HCLGE_MAC_SPEED_50G
;
737 *speed
= HCLGE_MAC_SPEED_100G
;
746 static void hclge_parse_fiber_link_mode(struct hclge_dev
*hdev
,
749 unsigned long *supported
= hdev
->hw
.mac
.supported
;
751 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
752 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
755 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
756 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
759 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
760 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
763 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
764 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
767 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
768 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
771 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
, supported
);
772 set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, supported
);
775 static void hclge_parse_link_mode(struct hclge_dev
*hdev
, u8 speed_ability
)
777 u8 media_type
= hdev
->hw
.mac
.media_type
;
779 if (media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
782 hclge_parse_fiber_link_mode(hdev
, speed_ability
);
785 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
787 struct hclge_cfg_param_cmd
*req
;
788 u64 mac_addr_tmp_high
;
792 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
794 /* get the configuration */
795 cfg
->vmdq_vport_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
798 cfg
->tc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
799 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
800 cfg
->tqp_desc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
801 HCLGE_CFG_TQP_DESC_N_M
,
802 HCLGE_CFG_TQP_DESC_N_S
);
804 cfg
->phy_addr
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
805 HCLGE_CFG_PHY_ADDR_M
,
806 HCLGE_CFG_PHY_ADDR_S
);
807 cfg
->media_type
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
808 HCLGE_CFG_MEDIA_TP_M
,
809 HCLGE_CFG_MEDIA_TP_S
);
810 cfg
->rx_buf_len
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
811 HCLGE_CFG_RX_BUF_LEN_M
,
812 HCLGE_CFG_RX_BUF_LEN_S
);
813 /* get mac_address */
814 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
815 mac_addr_tmp_high
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
816 HCLGE_CFG_MAC_ADDR_H_M
,
817 HCLGE_CFG_MAC_ADDR_H_S
);
819 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
821 cfg
->default_speed
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
822 HCLGE_CFG_DEFAULT_SPEED_M
,
823 HCLGE_CFG_DEFAULT_SPEED_S
);
824 cfg
->rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
825 HCLGE_CFG_RSS_SIZE_M
,
826 HCLGE_CFG_RSS_SIZE_S
);
828 for (i
= 0; i
< ETH_ALEN
; i
++)
829 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
831 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
832 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
834 cfg
->speed_ability
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
835 HCLGE_CFG_SPEED_ABILITY_M
,
836 HCLGE_CFG_SPEED_ABILITY_S
);
837 cfg
->umv_space
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
838 HCLGE_CFG_UMV_TBL_SPACE_M
,
839 HCLGE_CFG_UMV_TBL_SPACE_S
);
841 cfg
->umv_space
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
844 /* hclge_get_cfg: query the static parameter from flash
845 * @hdev: pointer to struct hclge_dev
846 * @hcfg: the config structure to be getted
848 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
850 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
851 struct hclge_cfg_param_cmd
*req
;
854 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
857 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
858 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
860 hnae3_set_field(offset
, HCLGE_CFG_OFFSET_M
,
861 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
862 /* Len should be united by 4 bytes when send to hardware */
863 hnae3_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
864 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
865 req
->offset
= cpu_to_le32(offset
);
868 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
870 dev_err(&hdev
->pdev
->dev
, "get config failed %d.\n", ret
);
874 hclge_parse_cfg(hcfg
, desc
);
879 static int hclge_get_cap(struct hclge_dev
*hdev
)
883 ret
= hclge_query_function_status(hdev
);
885 dev_err(&hdev
->pdev
->dev
,
886 "query function status error %d.\n", ret
);
890 /* get pf resource */
891 ret
= hclge_query_pf_resource(hdev
);
893 dev_err(&hdev
->pdev
->dev
, "query pf resource error %d.\n", ret
);
898 static int hclge_configure(struct hclge_dev
*hdev
)
900 struct hclge_cfg cfg
;
903 ret
= hclge_get_cfg(hdev
, &cfg
);
905 dev_err(&hdev
->pdev
->dev
, "get mac mode error %d.\n", ret
);
909 hdev
->num_vmdq_vport
= cfg
.vmdq_vport_num
;
910 hdev
->base_tqp_pid
= 0;
911 hdev
->rss_size_max
= cfg
.rss_size_max
;
912 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
913 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
914 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
915 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
916 hdev
->num_desc
= cfg
.tqp_desc_num
;
917 hdev
->tm_info
.num_pg
= 1;
918 hdev
->tc_max
= cfg
.tc_num
;
919 hdev
->tm_info
.hw_pfc_map
= 0;
920 hdev
->wanted_umv_size
= cfg
.umv_space
;
922 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
924 dev_err(&hdev
->pdev
->dev
, "Get wrong speed ret=%d.\n", ret
);
928 hclge_parse_link_mode(hdev
, cfg
.speed_ability
);
930 if ((hdev
->tc_max
> HNAE3_MAX_TC
) ||
931 (hdev
->tc_max
< 1)) {
932 dev_warn(&hdev
->pdev
->dev
, "TC num = %d.\n",
937 /* Dev does not support DCB */
938 if (!hnae3_dev_dcb_supported(hdev
)) {
942 hdev
->pfc_max
= hdev
->tc_max
;
945 hdev
->tm_info
.num_tc
= hdev
->tc_max
;
947 /* Currently not support uncontiuous tc */
948 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
949 hnae3_set_bit(hdev
->hw_tc_map
, i
, 1);
951 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
956 static int hclge_config_tso(struct hclge_dev
*hdev
, int tso_mss_min
,
959 struct hclge_cfg_tso_status_cmd
*req
;
960 struct hclge_desc desc
;
963 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
965 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
968 hnae3_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
969 HCLGE_TSO_MSS_MIN_S
, tso_mss_min
);
970 req
->tso_mss_min
= cpu_to_le16(tso_mss
);
973 hnae3_set_field(tso_mss
, HCLGE_TSO_MSS_MIN_M
,
974 HCLGE_TSO_MSS_MIN_S
, tso_mss_max
);
975 req
->tso_mss_max
= cpu_to_le16(tso_mss
);
977 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
980 static int hclge_config_gro(struct hclge_dev
*hdev
, bool en
)
982 struct hclge_cfg_gro_status_cmd
*req
;
983 struct hclge_desc desc
;
986 if (!hnae3_dev_gro_supported(hdev
))
989 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GRO_GENERIC_CONFIG
, false);
990 req
= (struct hclge_cfg_gro_status_cmd
*)desc
.data
;
992 req
->gro_en
= cpu_to_le16(en
? 1 : 0);
994 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
996 dev_err(&hdev
->pdev
->dev
,
997 "GRO hardware config cmd failed, ret = %d\n", ret
);
1002 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1004 struct hclge_tqp
*tqp
;
1007 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1008 sizeof(struct hclge_tqp
), GFP_KERNEL
);
1014 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1015 tqp
->dev
= &hdev
->pdev
->dev
;
1018 tqp
->q
.ae_algo
= &ae_algo
;
1019 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1020 tqp
->q
.desc_num
= hdev
->num_desc
;
1021 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGE_TQP_REG_OFFSET
+
1022 i
* HCLGE_TQP_REG_SIZE
;
1030 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1031 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1033 struct hclge_tqp_map_cmd
*req
;
1034 struct hclge_desc desc
;
1037 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1039 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1040 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1041 req
->tqp_vf
= func_id
;
1042 req
->tqp_flag
= !is_pf
<< HCLGE_TQP_MAP_TYPE_B
|
1043 1 << HCLGE_TQP_MAP_EN_B
;
1044 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1046 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1048 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n", ret
);
1053 static int hclge_assign_tqp(struct hclge_vport
*vport
)
1055 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1056 struct hclge_dev
*hdev
= vport
->back
;
1059 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1060 alloced
< kinfo
->num_tqps
; i
++) {
1061 if (!hdev
->htqp
[i
].alloced
) {
1062 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1063 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1064 hdev
->htqp
[i
].q
.desc_num
= kinfo
->num_desc
;
1065 kinfo
->tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1066 hdev
->htqp
[i
].alloced
= true;
1070 vport
->alloc_tqps
= kinfo
->num_tqps
;
1075 static int hclge_knic_setup(struct hclge_vport
*vport
,
1076 u16 num_tqps
, u16 num_desc
)
1078 struct hnae3_handle
*nic
= &vport
->nic
;
1079 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1080 struct hclge_dev
*hdev
= vport
->back
;
1083 kinfo
->num_desc
= num_desc
;
1084 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1085 kinfo
->num_tc
= min_t(u16
, num_tqps
, hdev
->tm_info
.num_tc
);
1087 = min_t(u16
, hdev
->rss_size_max
, num_tqps
/ kinfo
->num_tc
);
1088 kinfo
->num_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
1090 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1091 if (hdev
->hw_tc_map
& BIT(i
)) {
1092 kinfo
->tc_info
[i
].enable
= true;
1093 kinfo
->tc_info
[i
].tqp_offset
= i
* kinfo
->rss_size
;
1094 kinfo
->tc_info
[i
].tqp_count
= kinfo
->rss_size
;
1095 kinfo
->tc_info
[i
].tc
= i
;
1097 /* Set to default queue if TC is disable */
1098 kinfo
->tc_info
[i
].enable
= false;
1099 kinfo
->tc_info
[i
].tqp_offset
= 0;
1100 kinfo
->tc_info
[i
].tqp_count
= 1;
1101 kinfo
->tc_info
[i
].tc
= 0;
1105 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
1106 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1110 ret
= hclge_assign_tqp(vport
);
1112 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1117 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1118 struct hclge_vport
*vport
)
1120 struct hnae3_handle
*nic
= &vport
->nic
;
1121 struct hnae3_knic_private_info
*kinfo
;
1124 kinfo
= &nic
->kinfo
;
1125 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
1126 struct hclge_tqp
*q
=
1127 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
1131 is_pf
= !(vport
->vport_id
);
1132 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1141 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1143 struct hclge_vport
*vport
= hdev
->vport
;
1146 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1147 for (i
= 0; i
< num_vport
; i
++) {
1150 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1160 static void hclge_unic_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1162 /* this would be initialized later */
1165 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1167 struct hnae3_handle
*nic
= &vport
->nic
;
1168 struct hclge_dev
*hdev
= vport
->back
;
1171 nic
->pdev
= hdev
->pdev
;
1172 nic
->ae_algo
= &ae_algo
;
1173 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1175 if (hdev
->ae_dev
->dev_type
== HNAE3_DEV_KNIC
) {
1176 ret
= hclge_knic_setup(vport
, num_tqps
, hdev
->num_desc
);
1178 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n",
1183 hclge_unic_setup(vport
, num_tqps
);
1189 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1191 struct pci_dev
*pdev
= hdev
->pdev
;
1192 struct hclge_vport
*vport
;
1198 /* We need to alloc a vport for main NIC of PF */
1199 num_vport
= hdev
->num_vmdq_vport
+ hdev
->num_req_vfs
+ 1;
1201 if (hdev
->num_tqps
< num_vport
) {
1202 dev_err(&hdev
->pdev
->dev
, "tqps(%d) is less than vports(%d)",
1203 hdev
->num_tqps
, num_vport
);
1207 /* Alloc the same number of TQPs for every vport */
1208 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1209 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1211 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1216 hdev
->vport
= vport
;
1217 hdev
->num_alloc_vport
= num_vport
;
1219 if (IS_ENABLED(CONFIG_PCI_IOV
))
1220 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1222 for (i
= 0; i
< num_vport
; i
++) {
1224 vport
->vport_id
= i
;
1225 vport
->mps
= HCLGE_MAC_DEFAULT_FRAME
;
1228 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1230 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1233 "vport setup failed for vport %d, %d\n",
1244 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1245 struct hclge_pkt_buf_alloc
*buf_alloc
)
1247 /* TX buffer size is unit by 128 byte */
1248 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1249 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1250 struct hclge_tx_buff_alloc_cmd
*req
;
1251 struct hclge_desc desc
;
1255 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1257 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1258 for (i
= 0; i
< HCLGE_TC_NUM
; i
++) {
1259 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1261 req
->tx_pkt_buff
[i
] =
1262 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1263 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1266 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1268 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1274 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1275 struct hclge_pkt_buf_alloc
*buf_alloc
)
1277 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1280 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc failed %d\n", ret
);
1285 static int hclge_get_tc_num(struct hclge_dev
*hdev
)
1289 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1290 if (hdev
->hw_tc_map
& BIT(i
))
1295 static int hclge_get_pfc_enalbe_num(struct hclge_dev
*hdev
)
1299 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1300 if (hdev
->hw_tc_map
& BIT(i
) &&
1301 hdev
->tm_info
.hw_pfc_map
& BIT(i
))
1306 /* Get the number of pfc enabled TCs, which have private buffer */
1307 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1308 struct hclge_pkt_buf_alloc
*buf_alloc
)
1310 struct hclge_priv_buf
*priv
;
1313 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1314 priv
= &buf_alloc
->priv_buf
[i
];
1315 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1323 /* Get the number of pfc disabled TCs, which have private buffer */
1324 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1325 struct hclge_pkt_buf_alloc
*buf_alloc
)
1327 struct hclge_priv_buf
*priv
;
1330 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1331 priv
= &buf_alloc
->priv_buf
[i
];
1332 if (hdev
->hw_tc_map
& BIT(i
) &&
1333 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1341 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1343 struct hclge_priv_buf
*priv
;
1347 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1348 priv
= &buf_alloc
->priv_buf
[i
];
1350 rx_priv
+= priv
->buf_size
;
1355 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1357 u32 i
, total_tx_size
= 0;
1359 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1360 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1362 return total_tx_size
;
1365 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1366 struct hclge_pkt_buf_alloc
*buf_alloc
,
1369 u32 shared_buf_min
, shared_buf_tc
, shared_std
;
1370 int tc_num
, pfc_enable_num
;
1375 tc_num
= hclge_get_tc_num(hdev
);
1376 pfc_enable_num
= hclge_get_pfc_enalbe_num(hdev
);
1378 if (hnae3_dev_dcb_supported(hdev
))
1379 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_DV
;
1381 shared_buf_min
= 2 * hdev
->mps
+ HCLGE_DEFAULT_NON_DCB_DV
;
1383 shared_buf_tc
= pfc_enable_num
* hdev
->mps
+
1384 (tc_num
- pfc_enable_num
) * hdev
->mps
/ 2 +
1386 shared_std
= max_t(u32
, shared_buf_min
, shared_buf_tc
);
1388 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1389 if (rx_all
<= rx_priv
+ shared_std
)
1392 shared_buf
= rx_all
- rx_priv
;
1393 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1394 buf_alloc
->s_buf
.self
.high
= shared_buf
;
1395 buf_alloc
->s_buf
.self
.low
= 2 * hdev
->mps
;
1397 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1398 if ((hdev
->hw_tc_map
& BIT(i
)) &&
1399 (hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1400 buf_alloc
->s_buf
.tc_thrd
[i
].low
= hdev
->mps
;
1401 buf_alloc
->s_buf
.tc_thrd
[i
].high
= 2 * hdev
->mps
;
1403 buf_alloc
->s_buf
.tc_thrd
[i
].low
= 0;
1404 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hdev
->mps
;
1411 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
1412 struct hclge_pkt_buf_alloc
*buf_alloc
)
1416 total_size
= hdev
->pkt_buf_size
;
1418 /* alloc tx buffer for all enabled tc */
1419 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1420 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1422 if (total_size
< HCLGE_DEFAULT_TX_BUF
)
1425 if (hdev
->hw_tc_map
& BIT(i
))
1426 priv
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
1428 priv
->tx_buf_size
= 0;
1430 total_size
-= priv
->tx_buf_size
;
1436 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1437 * @hdev: pointer to struct hclge_dev
1438 * @buf_alloc: pointer to buffer calculation data
1439 * @return: 0: calculate sucessful, negative: fail
1441 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
1442 struct hclge_pkt_buf_alloc
*buf_alloc
)
1444 #define HCLGE_BUF_SIZE_UNIT 128
1445 u32 rx_all
= hdev
->pkt_buf_size
, aligned_mps
;
1446 int no_pfc_priv_num
, pfc_priv_num
;
1447 struct hclge_priv_buf
*priv
;
1450 aligned_mps
= round_up(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
1451 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
1453 /* When DCB is not supported, rx private
1454 * buffer is not allocated.
1456 if (!hnae3_dev_dcb_supported(hdev
)) {
1457 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1463 /* step 1, try to alloc private buffer for all enabled tc */
1464 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1465 priv
= &buf_alloc
->priv_buf
[i
];
1466 if (hdev
->hw_tc_map
& BIT(i
)) {
1468 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1469 priv
->wl
.low
= aligned_mps
;
1470 priv
->wl
.high
= priv
->wl
.low
+ aligned_mps
;
1471 priv
->buf_size
= priv
->wl
.high
+
1475 priv
->wl
.high
= 2 * aligned_mps
;
1476 priv
->buf_size
= priv
->wl
.high
;
1486 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1489 /* step 2, try to decrease the buffer size of
1490 * no pfc TC's private buffer
1492 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1493 priv
= &buf_alloc
->priv_buf
[i
];
1500 if (!(hdev
->hw_tc_map
& BIT(i
)))
1505 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1507 priv
->wl
.high
= priv
->wl
.low
+ aligned_mps
;
1508 priv
->buf_size
= priv
->wl
.high
+ HCLGE_DEFAULT_DV
;
1511 priv
->wl
.high
= aligned_mps
;
1512 priv
->buf_size
= priv
->wl
.high
;
1516 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1519 /* step 3, try to reduce the number of pfc disabled TCs,
1520 * which have private buffer
1522 /* get the total no pfc enable TC number, which have private buffer */
1523 no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
1525 /* let the last to be cleared first */
1526 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1527 priv
= &buf_alloc
->priv_buf
[i
];
1529 if (hdev
->hw_tc_map
& BIT(i
) &&
1530 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
))) {
1531 /* Clear the no pfc TC private buffer */
1539 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1540 no_pfc_priv_num
== 0)
1544 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1547 /* step 4, try to reduce the number of pfc enabled TCs
1548 * which have private buffer.
1550 pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
1552 /* let the last to be cleared first */
1553 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
1554 priv
= &buf_alloc
->priv_buf
[i
];
1556 if (hdev
->hw_tc_map
& BIT(i
) &&
1557 hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
1558 /* Reduce the number of pfc TC with private buffer */
1566 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
1570 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
1576 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
1577 struct hclge_pkt_buf_alloc
*buf_alloc
)
1579 struct hclge_rx_priv_buff_cmd
*req
;
1580 struct hclge_desc desc
;
1584 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
1585 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
1587 /* Alloc private buffer TCs */
1588 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1589 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
1592 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
1594 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
1598 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
1599 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
1601 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1603 dev_err(&hdev
->pdev
->dev
,
1604 "rx private buffer alloc cmd failed %d\n", ret
);
1609 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
1610 struct hclge_pkt_buf_alloc
*buf_alloc
)
1612 struct hclge_rx_priv_wl_buf
*req
;
1613 struct hclge_priv_buf
*priv
;
1614 struct hclge_desc desc
[2];
1618 for (i
= 0; i
< 2; i
++) {
1619 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
1621 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
1623 /* The first descriptor set the NEXT bit to 1 */
1625 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1627 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1629 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1630 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
1632 priv
= &buf_alloc
->priv_buf
[idx
];
1633 req
->tc_wl
[j
].high
=
1634 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
1635 req
->tc_wl
[j
].high
|=
1636 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1638 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
1639 req
->tc_wl
[j
].low
|=
1640 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1644 /* Send 2 descriptor at one time */
1645 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1647 dev_err(&hdev
->pdev
->dev
,
1648 "rx private waterline config cmd failed %d\n",
1653 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
1654 struct hclge_pkt_buf_alloc
*buf_alloc
)
1656 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
1657 struct hclge_rx_com_thrd
*req
;
1658 struct hclge_desc desc
[2];
1659 struct hclge_tc_thrd
*tc
;
1663 for (i
= 0; i
< 2; i
++) {
1664 hclge_cmd_setup_basic_desc(&desc
[i
],
1665 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
1666 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
1668 /* The first descriptor set the NEXT bit to 1 */
1670 desc
[i
].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1672 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
1674 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
1675 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
1677 req
->com_thrd
[j
].high
=
1678 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
1679 req
->com_thrd
[j
].high
|=
1680 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1681 req
->com_thrd
[j
].low
=
1682 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
1683 req
->com_thrd
[j
].low
|=
1684 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1688 /* Send 2 descriptors at one time */
1689 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
1691 dev_err(&hdev
->pdev
->dev
,
1692 "common threshold config cmd failed %d\n", ret
);
1696 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
1697 struct hclge_pkt_buf_alloc
*buf_alloc
)
1699 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
1700 struct hclge_rx_com_wl
*req
;
1701 struct hclge_desc desc
;
1704 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
1706 req
= (struct hclge_rx_com_wl
*)desc
.data
;
1707 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
1708 req
->com_wl
.high
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1710 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
1711 req
->com_wl
.low
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
1713 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1715 dev_err(&hdev
->pdev
->dev
,
1716 "common waterline config cmd failed %d\n", ret
);
1721 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
1723 struct hclge_pkt_buf_alloc
*pkt_buf
;
1726 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
1730 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
1732 dev_err(&hdev
->pdev
->dev
,
1733 "could not calc tx buffer size for all TCs %d\n", ret
);
1737 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
1739 dev_err(&hdev
->pdev
->dev
,
1740 "could not alloc tx buffers %d\n", ret
);
1744 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
1746 dev_err(&hdev
->pdev
->dev
,
1747 "could not calc rx priv buffer size for all TCs %d\n",
1752 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
1754 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
1759 if (hnae3_dev_dcb_supported(hdev
)) {
1760 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
1762 dev_err(&hdev
->pdev
->dev
,
1763 "could not configure rx private waterline %d\n",
1768 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
1770 dev_err(&hdev
->pdev
->dev
,
1771 "could not configure common threshold %d\n",
1777 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
1779 dev_err(&hdev
->pdev
->dev
,
1780 "could not configure common waterline %d\n", ret
);
1787 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
1789 struct hnae3_handle
*roce
= &vport
->roce
;
1790 struct hnae3_handle
*nic
= &vport
->nic
;
1792 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
1794 if (vport
->back
->num_msi_left
< vport
->roce
.rinfo
.num_vectors
||
1795 vport
->back
->num_msi_left
== 0)
1798 roce
->rinfo
.base_vector
= vport
->back
->roce_base_vector
;
1800 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1801 roce
->rinfo
.roce_io_base
= vport
->back
->hw
.io_base
;
1803 roce
->pdev
= nic
->pdev
;
1804 roce
->ae_algo
= nic
->ae_algo
;
1805 roce
->numa_node_mask
= nic
->numa_node_mask
;
1810 static int hclge_init_msi(struct hclge_dev
*hdev
)
1812 struct pci_dev
*pdev
= hdev
->pdev
;
1816 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1817 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1820 "failed(%d) to allocate MSI/MSI-X vectors\n",
1824 if (vectors
< hdev
->num_msi
)
1825 dev_warn(&hdev
->pdev
->dev
,
1826 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1827 hdev
->num_msi
, vectors
);
1829 hdev
->num_msi
= vectors
;
1830 hdev
->num_msi_left
= vectors
;
1831 hdev
->base_msi_vector
= pdev
->irq
;
1832 hdev
->roce_base_vector
= hdev
->base_msi_vector
+
1833 hdev
->roce_base_msix_offset
;
1835 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1836 sizeof(u16
), GFP_KERNEL
);
1837 if (!hdev
->vector_status
) {
1838 pci_free_irq_vectors(pdev
);
1842 for (i
= 0; i
< hdev
->num_msi
; i
++)
1843 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
1845 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1846 sizeof(int), GFP_KERNEL
);
1847 if (!hdev
->vector_irq
) {
1848 pci_free_irq_vectors(pdev
);
1855 static u8
hclge_check_speed_dup(u8 duplex
, int speed
)
1858 if (!(speed
== HCLGE_MAC_SPEED_10M
|| speed
== HCLGE_MAC_SPEED_100M
))
1859 duplex
= HCLGE_MAC_FULL
;
1864 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev
*hdev
, int speed
,
1867 struct hclge_config_mac_speed_dup_cmd
*req
;
1868 struct hclge_desc desc
;
1871 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
1873 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
1875 hnae3_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, !!duplex
);
1878 case HCLGE_MAC_SPEED_10M
:
1879 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1880 HCLGE_CFG_SPEED_S
, 6);
1882 case HCLGE_MAC_SPEED_100M
:
1883 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1884 HCLGE_CFG_SPEED_S
, 7);
1886 case HCLGE_MAC_SPEED_1G
:
1887 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1888 HCLGE_CFG_SPEED_S
, 0);
1890 case HCLGE_MAC_SPEED_10G
:
1891 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1892 HCLGE_CFG_SPEED_S
, 1);
1894 case HCLGE_MAC_SPEED_25G
:
1895 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1896 HCLGE_CFG_SPEED_S
, 2);
1898 case HCLGE_MAC_SPEED_40G
:
1899 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1900 HCLGE_CFG_SPEED_S
, 3);
1902 case HCLGE_MAC_SPEED_50G
:
1903 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1904 HCLGE_CFG_SPEED_S
, 4);
1906 case HCLGE_MAC_SPEED_100G
:
1907 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
,
1908 HCLGE_CFG_SPEED_S
, 5);
1911 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
1915 hnae3_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
1918 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1920 dev_err(&hdev
->pdev
->dev
,
1921 "mac speed/duplex config cmd failed %d.\n", ret
);
1928 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
)
1932 duplex
= hclge_check_speed_dup(duplex
, speed
);
1933 if (hdev
->hw
.mac
.speed
== speed
&& hdev
->hw
.mac
.duplex
== duplex
)
1936 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, speed
, duplex
);
1940 hdev
->hw
.mac
.speed
= speed
;
1941 hdev
->hw
.mac
.duplex
= duplex
;
1946 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
1949 struct hclge_vport
*vport
= hclge_get_vport(handle
);
1950 struct hclge_dev
*hdev
= vport
->back
;
1952 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
1955 static int hclge_query_mac_an_speed_dup(struct hclge_dev
*hdev
, int *speed
,
1958 struct hclge_query_an_speed_dup_cmd
*req
;
1959 struct hclge_desc desc
;
1963 req
= (struct hclge_query_an_speed_dup_cmd
*)desc
.data
;
1965 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_AN_RESULT
, true);
1966 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1968 dev_err(&hdev
->pdev
->dev
,
1969 "mac speed/autoneg/duplex query cmd failed %d\n",
1974 *duplex
= hnae3_get_bit(req
->an_syn_dup_speed
, HCLGE_QUERY_DUPLEX_B
);
1975 speed_tmp
= hnae3_get_field(req
->an_syn_dup_speed
, HCLGE_QUERY_SPEED_M
,
1976 HCLGE_QUERY_SPEED_S
);
1978 ret
= hclge_parse_speed(speed_tmp
, speed
);
1980 dev_err(&hdev
->pdev
->dev
,
1981 "could not parse speed(=%d), %d\n", speed_tmp
, ret
);
1986 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
1988 struct hclge_config_auto_neg_cmd
*req
;
1989 struct hclge_desc desc
;
1993 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
1995 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
1996 hnae3_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, !!enable
);
1997 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
1999 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2001 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2007 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2009 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2010 struct hclge_dev
*hdev
= vport
->back
;
2012 return hclge_set_autoneg_en(hdev
, enable
);
2015 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2017 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2018 struct hclge_dev
*hdev
= vport
->back
;
2019 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2022 return phydev
->autoneg
;
2024 return hdev
->hw
.mac
.autoneg
;
2027 static int hclge_mac_init(struct hclge_dev
*hdev
)
2029 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2032 hdev
->hw
.mac
.duplex
= HCLGE_MAC_FULL
;
2033 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, hdev
->hw
.mac
.speed
,
2034 hdev
->hw
.mac
.duplex
);
2036 dev_err(&hdev
->pdev
->dev
,
2037 "Config mac speed dup fail ret=%d\n", ret
);
2043 ret
= hclge_set_mac_mtu(hdev
, hdev
->mps
);
2045 dev_err(&hdev
->pdev
->dev
, "set mtu failed ret=%d\n", ret
);
2049 ret
= hclge_buffer_alloc(hdev
);
2051 dev_err(&hdev
->pdev
->dev
,
2052 "allocate buffer fail, ret=%d\n", ret
);
2057 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2059 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
2060 schedule_work(&hdev
->mbx_service_task
);
2063 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2065 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
2066 schedule_work(&hdev
->rst_service_task
);
2069 static void hclge_task_schedule(struct hclge_dev
*hdev
)
2071 if (!test_bit(HCLGE_STATE_DOWN
, &hdev
->state
) &&
2072 !test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2073 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
))
2074 (void)schedule_work(&hdev
->service_task
);
2077 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
)
2079 struct hclge_link_status_cmd
*req
;
2080 struct hclge_desc desc
;
2084 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2085 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2087 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2092 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2093 link_status
= req
->status
& HCLGE_LINK_STATUS_UP_M
;
2095 return !!link_status
;
2098 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
)
2103 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
))
2106 mac_state
= hclge_get_mac_link_status(hdev
);
2108 if (hdev
->hw
.mac
.phydev
) {
2109 if (hdev
->hw
.mac
.phydev
->state
== PHY_RUNNING
)
2110 link_stat
= mac_state
&
2111 hdev
->hw
.mac
.phydev
->link
;
2116 link_stat
= mac_state
;
2122 static void hclge_update_link_status(struct hclge_dev
*hdev
)
2124 struct hnae3_client
*client
= hdev
->nic_client
;
2125 struct hnae3_handle
*handle
;
2131 state
= hclge_get_mac_phy_link(hdev
);
2132 if (state
!= hdev
->hw
.mac
.link
) {
2133 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2134 handle
= &hdev
->vport
[i
].nic
;
2135 client
->ops
->link_status_change(handle
, state
);
2137 hdev
->hw
.mac
.link
= state
;
2141 static int hclge_update_speed_duplex(struct hclge_dev
*hdev
)
2143 struct hclge_mac mac
= hdev
->hw
.mac
;
2148 /* get the speed and duplex as autoneg'result from mac cmd when phy
2151 if (mac
.phydev
|| !mac
.autoneg
)
2154 ret
= hclge_query_mac_an_speed_dup(hdev
, &speed
, &duplex
);
2156 dev_err(&hdev
->pdev
->dev
,
2157 "mac autoneg/speed/duplex query failed %d\n", ret
);
2161 ret
= hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
);
2163 dev_err(&hdev
->pdev
->dev
,
2164 "mac speed/duplex config failed %d\n", ret
);
2171 static int hclge_update_speed_duplex_h(struct hnae3_handle
*handle
)
2173 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2174 struct hclge_dev
*hdev
= vport
->back
;
2176 return hclge_update_speed_duplex(hdev
);
2179 static int hclge_get_status(struct hnae3_handle
*handle
)
2181 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2182 struct hclge_dev
*hdev
= vport
->back
;
2184 hclge_update_link_status(hdev
);
2186 return hdev
->hw
.mac
.link
;
2189 static void hclge_service_timer(struct timer_list
*t
)
2191 struct hclge_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
2193 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
2194 hdev
->hw_stats
.stats_timer
++;
2195 hclge_task_schedule(hdev
);
2198 static void hclge_service_complete(struct hclge_dev
*hdev
)
2200 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
));
2202 /* Flush memory before next watchdog */
2203 smp_mb__before_atomic();
2204 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
2207 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
2209 u32 rst_src_reg
, cmdq_src_reg
, msix_src_reg
;
2211 /* fetch the events from their corresponding regs */
2212 rst_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
2213 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
2214 msix_src_reg
= hclge_read_dev(&hdev
->hw
,
2215 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG
);
2217 /* Assumption: If by any chance reset and mailbox events are reported
2218 * together then we will only process reset event in this go and will
2219 * defer the processing of the mailbox events. Since, we would have not
2220 * cleared RX CMDQ event this time we would receive again another
2221 * interrupt from H/W just for the mailbox.
2224 /* check for vector0 reset event sources */
2225 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & rst_src_reg
) {
2226 dev_info(&hdev
->pdev
->dev
, "IMP reset interrupt\n");
2227 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
2228 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2229 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2230 return HCLGE_VECTOR0_EVENT_RST
;
2233 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & rst_src_reg
) {
2234 dev_info(&hdev
->pdev
->dev
, "global reset interrupt\n");
2235 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2236 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
2237 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2238 return HCLGE_VECTOR0_EVENT_RST
;
2241 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B
) & rst_src_reg
) {
2242 dev_info(&hdev
->pdev
->dev
, "core reset interrupt\n");
2243 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2244 set_bit(HNAE3_CORE_RESET
, &hdev
->reset_pending
);
2245 *clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2246 return HCLGE_VECTOR0_EVENT_RST
;
2249 /* check for vector0 msix event source */
2250 if (msix_src_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
)
2251 return HCLGE_VECTOR0_EVENT_ERR
;
2253 /* check for vector0 mailbox(=CMDQ RX) event source */
2254 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
2255 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
2256 *clearval
= cmdq_src_reg
;
2257 return HCLGE_VECTOR0_EVENT_MBX
;
2260 return HCLGE_VECTOR0_EVENT_OTHER
;
2263 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
2266 switch (event_type
) {
2267 case HCLGE_VECTOR0_EVENT_RST
:
2268 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
2270 case HCLGE_VECTOR0_EVENT_MBX
:
2271 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
2278 static void hclge_clear_all_event_cause(struct hclge_dev
*hdev
)
2280 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_RST
,
2281 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) |
2282 BIT(HCLGE_VECTOR0_CORERESET_INT_B
) |
2283 BIT(HCLGE_VECTOR0_IMPRESET_INT_B
));
2284 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_MBX
, 0);
2287 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
2289 writel(enable
? 1 : 0, vector
->addr
);
2292 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
2294 struct hclge_dev
*hdev
= data
;
2298 hclge_enable_vector(&hdev
->misc_vector
, false);
2299 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
2301 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2302 switch (event_cause
) {
2303 case HCLGE_VECTOR0_EVENT_ERR
:
2304 /* we do not know what type of reset is required now. This could
2305 * only be decided after we fetch the type of errors which
2306 * caused this event. Therefore, we will do below for now:
2307 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2308 * have defered type of reset to be used.
2309 * 2. Schedule the reset serivce task.
2310 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2311 * will fetch the correct type of reset. This would be done
2312 * by first decoding the types of errors.
2314 set_bit(HNAE3_UNKNOWN_RESET
, &hdev
->reset_request
);
2316 case HCLGE_VECTOR0_EVENT_RST
:
2317 hclge_reset_task_schedule(hdev
);
2319 case HCLGE_VECTOR0_EVENT_MBX
:
2320 /* If we are here then,
2321 * 1. Either we are not handling any mbx task and we are not
2324 * 2. We could be handling a mbx task but nothing more is
2326 * In both cases, we should schedule mbx task as there are more
2327 * mbx messages reported by this interrupt.
2329 hclge_mbx_task_schedule(hdev
);
2332 dev_warn(&hdev
->pdev
->dev
,
2333 "received unknown or unhandled event of vector0\n");
2337 /* clear the source of interrupt if it is not cause by reset */
2338 if (event_cause
== HCLGE_VECTOR0_EVENT_MBX
) {
2339 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
2340 hclge_enable_vector(&hdev
->misc_vector
, true);
2346 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
2348 if (hdev
->vector_status
[vector_id
] == HCLGE_INVALID_VPORT
) {
2349 dev_warn(&hdev
->pdev
->dev
,
2350 "vector(vector_id %d) has been freed.\n", vector_id
);
2354 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
2355 hdev
->num_msi_left
+= 1;
2356 hdev
->num_msi_used
-= 1;
2359 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
2361 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
2363 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
2365 vector
->addr
= hdev
->hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
2366 hdev
->vector_status
[0] = 0;
2368 hdev
->num_msi_left
-= 1;
2369 hdev
->num_msi_used
+= 1;
2372 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
2376 hclge_get_misc_vector(hdev
);
2378 /* this would be explicitly freed in the end */
2379 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
2380 0, "hclge_misc", hdev
);
2382 hclge_free_vector(hdev
, 0);
2383 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
2384 hdev
->misc_vector
.vector_irq
);
2390 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
2392 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2393 hclge_free_vector(hdev
, 0);
2396 static int hclge_notify_client(struct hclge_dev
*hdev
,
2397 enum hnae3_reset_notify_type type
)
2399 struct hnae3_client
*client
= hdev
->nic_client
;
2402 if (!client
->ops
->reset_notify
)
2405 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2406 struct hnae3_handle
*handle
= &hdev
->vport
[i
].nic
;
2409 ret
= client
->ops
->reset_notify(handle
, type
);
2411 dev_err(&hdev
->pdev
->dev
,
2412 "notify nic client failed %d(%d)\n", type
, ret
);
2420 static int hclge_notify_roce_client(struct hclge_dev
*hdev
,
2421 enum hnae3_reset_notify_type type
)
2423 struct hnae3_client
*client
= hdev
->roce_client
;
2430 if (!client
->ops
->reset_notify
)
2433 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
2434 struct hnae3_handle
*handle
= &hdev
->vport
[i
].roce
;
2436 ret
= client
->ops
->reset_notify(handle
, type
);
2438 dev_err(&hdev
->pdev
->dev
,
2439 "notify roce client failed %d(%d)",
2448 static int hclge_reset_wait(struct hclge_dev
*hdev
)
2450 #define HCLGE_RESET_WATI_MS 100
2451 #define HCLGE_RESET_WAIT_CNT 200
2452 u32 val
, reg
, reg_bit
;
2455 switch (hdev
->reset_type
) {
2456 case HNAE3_IMP_RESET
:
2457 reg
= HCLGE_GLOBAL_RESET_REG
;
2458 reg_bit
= HCLGE_IMP_RESET_BIT
;
2460 case HNAE3_GLOBAL_RESET
:
2461 reg
= HCLGE_GLOBAL_RESET_REG
;
2462 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
2464 case HNAE3_CORE_RESET
:
2465 reg
= HCLGE_GLOBAL_RESET_REG
;
2466 reg_bit
= HCLGE_CORE_RESET_BIT
;
2468 case HNAE3_FUNC_RESET
:
2469 reg
= HCLGE_FUN_RST_ING
;
2470 reg_bit
= HCLGE_FUN_RST_ING_B
;
2472 case HNAE3_FLR_RESET
:
2475 dev_err(&hdev
->pdev
->dev
,
2476 "Wait for unsupported reset type: %d\n",
2481 if (hdev
->reset_type
== HNAE3_FLR_RESET
) {
2482 while (!test_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
) &&
2483 cnt
++ < HCLGE_RESET_WAIT_CNT
)
2484 msleep(HCLGE_RESET_WATI_MS
);
2486 if (!test_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
)) {
2487 dev_err(&hdev
->pdev
->dev
,
2488 "flr wait timeout: %d\n", cnt
);
2495 val
= hclge_read_dev(&hdev
->hw
, reg
);
2496 while (hnae3_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
2497 msleep(HCLGE_RESET_WATI_MS
);
2498 val
= hclge_read_dev(&hdev
->hw
, reg
);
2502 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
2503 dev_warn(&hdev
->pdev
->dev
,
2504 "Wait for reset timeout: %d\n", hdev
->reset_type
);
2511 static int hclge_set_vf_rst(struct hclge_dev
*hdev
, int func_id
, bool reset
)
2513 struct hclge_vf_rst_cmd
*req
;
2514 struct hclge_desc desc
;
2516 req
= (struct hclge_vf_rst_cmd
*)desc
.data
;
2517 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GBL_RST_STATUS
, false);
2518 req
->dest_vfid
= func_id
;
2523 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2526 int hclge_set_all_vf_rst(struct hclge_dev
*hdev
, bool reset
)
2530 for (i
= hdev
->num_vmdq_vport
+ 1; i
< hdev
->num_alloc_vport
; i
++) {
2531 struct hclge_vport
*vport
= &hdev
->vport
[i
];
2534 /* Send cmd to set/clear VF's FUNC_RST_ING */
2535 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, reset
);
2537 dev_err(&hdev
->pdev
->dev
,
2538 "set vf(%d) rst failed %d!\n",
2539 vport
->vport_id
, ret
);
2546 /* Inform VF to process the reset.
2547 * hclge_inform_reset_assert_to_vf may fail if VF
2548 * driver is not loaded.
2550 ret
= hclge_inform_reset_assert_to_vf(vport
);
2552 dev_warn(&hdev
->pdev
->dev
,
2553 "inform reset to vf(%d) failed %d!\n",
2554 vport
->vport_id
, ret
);
2560 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
2562 struct hclge_desc desc
;
2563 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
2566 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
2567 hnae3_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
2568 req
->fun_reset_vfid
= func_id
;
2570 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2572 dev_err(&hdev
->pdev
->dev
,
2573 "send function reset cmd fail, status =%d\n", ret
);
2578 static void hclge_do_reset(struct hclge_dev
*hdev
)
2580 struct pci_dev
*pdev
= hdev
->pdev
;
2583 switch (hdev
->reset_type
) {
2584 case HNAE3_GLOBAL_RESET
:
2585 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2586 hnae3_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
2587 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2588 dev_info(&pdev
->dev
, "Global Reset requested\n");
2590 case HNAE3_CORE_RESET
:
2591 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
2592 hnae3_set_bit(val
, HCLGE_CORE_RESET_BIT
, 1);
2593 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
2594 dev_info(&pdev
->dev
, "Core Reset requested\n");
2596 case HNAE3_FUNC_RESET
:
2597 dev_info(&pdev
->dev
, "PF Reset requested\n");
2598 /* schedule again to check later */
2599 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
2600 hclge_reset_task_schedule(hdev
);
2602 case HNAE3_FLR_RESET
:
2603 dev_info(&pdev
->dev
, "FLR requested\n");
2604 /* schedule again to check later */
2605 set_bit(HNAE3_FLR_RESET
, &hdev
->reset_pending
);
2606 hclge_reset_task_schedule(hdev
);
2609 dev_warn(&pdev
->dev
,
2610 "Unsupported reset type: %d\n", hdev
->reset_type
);
2615 static enum hnae3_reset_type
hclge_get_reset_level(struct hclge_dev
*hdev
,
2616 unsigned long *addr
)
2618 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
2620 /* first, resolve any unknown reset type to the known type(s) */
2621 if (test_bit(HNAE3_UNKNOWN_RESET
, addr
)) {
2622 /* we will intentionally ignore any errors from this function
2623 * as we will end up in *some* reset request in any case
2625 hclge_handle_hw_msix_error(hdev
, addr
);
2626 clear_bit(HNAE3_UNKNOWN_RESET
, addr
);
2627 /* We defered the clearing of the error event which caused
2628 * interrupt since it was not posssible to do that in
2629 * interrupt context (and this is the reason we introduced
2630 * new UNKNOWN reset type). Now, the errors have been
2631 * handled and cleared in hardware we can safely enable
2632 * interrupts. This is an exception to the norm.
2634 hclge_enable_vector(&hdev
->misc_vector
, true);
2637 /* return the highest priority reset level amongst all */
2638 if (test_bit(HNAE3_IMP_RESET
, addr
)) {
2639 rst_level
= HNAE3_IMP_RESET
;
2640 clear_bit(HNAE3_IMP_RESET
, addr
);
2641 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2642 clear_bit(HNAE3_CORE_RESET
, addr
);
2643 clear_bit(HNAE3_FUNC_RESET
, addr
);
2644 } else if (test_bit(HNAE3_GLOBAL_RESET
, addr
)) {
2645 rst_level
= HNAE3_GLOBAL_RESET
;
2646 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
2647 clear_bit(HNAE3_CORE_RESET
, addr
);
2648 clear_bit(HNAE3_FUNC_RESET
, addr
);
2649 } else if (test_bit(HNAE3_CORE_RESET
, addr
)) {
2650 rst_level
= HNAE3_CORE_RESET
;
2651 clear_bit(HNAE3_CORE_RESET
, addr
);
2652 clear_bit(HNAE3_FUNC_RESET
, addr
);
2653 } else if (test_bit(HNAE3_FUNC_RESET
, addr
)) {
2654 rst_level
= HNAE3_FUNC_RESET
;
2655 clear_bit(HNAE3_FUNC_RESET
, addr
);
2656 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
2657 rst_level
= HNAE3_FLR_RESET
;
2658 clear_bit(HNAE3_FLR_RESET
, addr
);
2664 static void hclge_clear_reset_cause(struct hclge_dev
*hdev
)
2668 switch (hdev
->reset_type
) {
2669 case HNAE3_IMP_RESET
:
2670 clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
2672 case HNAE3_GLOBAL_RESET
:
2673 clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
2675 case HNAE3_CORE_RESET
:
2676 clearval
= BIT(HCLGE_VECTOR0_CORERESET_INT_B
);
2685 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, clearval
);
2686 hclge_enable_vector(&hdev
->misc_vector
, true);
2689 static int hclge_reset_prepare_down(struct hclge_dev
*hdev
)
2693 switch (hdev
->reset_type
) {
2694 case HNAE3_FUNC_RESET
:
2696 case HNAE3_FLR_RESET
:
2697 ret
= hclge_set_all_vf_rst(hdev
, true);
2706 static int hclge_reset_prepare_wait(struct hclge_dev
*hdev
)
2711 switch (hdev
->reset_type
) {
2712 case HNAE3_FUNC_RESET
:
2713 /* There is no mechanism for PF to know if VF has stopped IO
2714 * for now, just wait 100 ms for VF to stop IO
2717 ret
= hclge_func_reset_cmd(hdev
, 0);
2719 dev_err(&hdev
->pdev
->dev
,
2720 "asserting function reset fail %d!\n", ret
);
2724 /* After performaning pf reset, it is not necessary to do the
2725 * mailbox handling or send any command to firmware, because
2726 * any mailbox handling or command to firmware is only valid
2727 * after hclge_cmd_init is called.
2729 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2731 case HNAE3_FLR_RESET
:
2732 /* There is no mechanism for PF to know if VF has stopped IO
2733 * for now, just wait 100 ms for VF to stop IO
2736 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
2737 set_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
);
2739 case HNAE3_IMP_RESET
:
2740 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
2741 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
,
2742 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B
) | reg_val
);
2748 dev_info(&hdev
->pdev
->dev
, "prepare wait ok\n");
2753 static bool hclge_reset_err_handle(struct hclge_dev
*hdev
, bool is_timeout
)
2755 #define MAX_RESET_FAIL_CNT 5
2756 #define RESET_UPGRADE_DELAY_SEC 10
2758 if (hdev
->reset_pending
) {
2759 dev_info(&hdev
->pdev
->dev
, "Reset pending %lu\n",
2760 hdev
->reset_pending
);
2762 } else if ((hdev
->reset_type
!= HNAE3_IMP_RESET
) &&
2763 (hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
) &
2764 BIT(HCLGE_IMP_RESET_BIT
))) {
2765 dev_info(&hdev
->pdev
->dev
,
2766 "reset failed because IMP Reset is pending\n");
2767 hclge_clear_reset_cause(hdev
);
2769 } else if (hdev
->reset_fail_cnt
< MAX_RESET_FAIL_CNT
) {
2770 hdev
->reset_fail_cnt
++;
2772 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
2773 dev_info(&hdev
->pdev
->dev
,
2774 "re-schedule to wait for hw reset done\n");
2778 dev_info(&hdev
->pdev
->dev
, "Upgrade reset level\n");
2779 hclge_clear_reset_cause(hdev
);
2780 mod_timer(&hdev
->reset_timer
,
2781 jiffies
+ RESET_UPGRADE_DELAY_SEC
* HZ
);
2786 hclge_clear_reset_cause(hdev
);
2787 dev_err(&hdev
->pdev
->dev
, "Reset fail!\n");
2791 static int hclge_reset_prepare_up(struct hclge_dev
*hdev
)
2795 switch (hdev
->reset_type
) {
2796 case HNAE3_FUNC_RESET
:
2798 case HNAE3_FLR_RESET
:
2799 ret
= hclge_set_all_vf_rst(hdev
, false);
2808 static void hclge_reset(struct hclge_dev
*hdev
)
2810 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2811 bool is_timeout
= false;
2814 /* Initialize ae_dev reset status as well, in case enet layer wants to
2815 * know if device is undergoing reset
2817 ae_dev
->reset_type
= hdev
->reset_type
;
2818 hdev
->reset_count
++;
2819 hdev
->last_reset_time
= jiffies
;
2820 /* perform reset of the stack & ae device for a client */
2821 ret
= hclge_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
2825 ret
= hclge_reset_prepare_down(hdev
);
2830 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2832 goto err_reset_lock
;
2836 ret
= hclge_reset_prepare_wait(hdev
);
2840 if (hclge_reset_wait(hdev
)) {
2845 ret
= hclge_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
2850 ret
= hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
2852 goto err_reset_lock
;
2854 ret
= hclge_reset_ae_dev(hdev
->ae_dev
);
2856 goto err_reset_lock
;
2858 ret
= hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
2860 goto err_reset_lock
;
2862 hclge_clear_reset_cause(hdev
);
2864 ret
= hclge_reset_prepare_up(hdev
);
2866 goto err_reset_lock
;
2868 ret
= hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2870 goto err_reset_lock
;
2874 ret
= hclge_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
2878 ret
= hclge_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
2887 if (hclge_reset_err_handle(hdev
, is_timeout
))
2888 hclge_reset_task_schedule(hdev
);
2891 static void hclge_reset_event(struct pci_dev
*pdev
, struct hnae3_handle
*handle
)
2893 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
2894 struct hclge_dev
*hdev
= ae_dev
->priv
;
2896 /* We might end up getting called broadly because of 2 below cases:
2897 * 1. Recoverable error was conveyed through APEI and only way to bring
2898 * normalcy is to reset.
2899 * 2. A new reset request from the stack due to timeout
2901 * For the first case,error event might not have ae handle available.
2902 * check if this is a new reset request and we are not here just because
2903 * last reset attempt did not succeed and watchdog hit us again. We will
2904 * know this if last reset request did not occur very recently (watchdog
2905 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2906 * In case of new request we reset the "reset level" to PF reset.
2907 * And if it is a repeat reset request of the most recent one then we
2908 * want to make sure we throttle the reset request. Therefore, we will
2909 * not allow it again before 3*HZ times.
2912 handle
= &hdev
->vport
[0].nic
;
2914 if (time_before(jiffies
, (hdev
->last_reset_time
+ 3 * HZ
)))
2916 else if (hdev
->default_reset_request
)
2918 hclge_get_reset_level(hdev
,
2919 &hdev
->default_reset_request
);
2920 else if (time_after(jiffies
, (hdev
->last_reset_time
+ 4 * 5 * HZ
)))
2921 hdev
->reset_level
= HNAE3_FUNC_RESET
;
2923 dev_info(&hdev
->pdev
->dev
, "received reset event , reset type is %d",
2926 /* request reset & schedule reset task */
2927 set_bit(hdev
->reset_level
, &hdev
->reset_request
);
2928 hclge_reset_task_schedule(hdev
);
2930 if (hdev
->reset_level
< HNAE3_GLOBAL_RESET
)
2931 hdev
->reset_level
++;
2934 static void hclge_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
2935 enum hnae3_reset_type rst_type
)
2937 struct hclge_dev
*hdev
= ae_dev
->priv
;
2939 set_bit(rst_type
, &hdev
->default_reset_request
);
2942 static void hclge_reset_timer(struct timer_list
*t
)
2944 struct hclge_dev
*hdev
= from_timer(hdev
, t
, reset_timer
);
2946 dev_info(&hdev
->pdev
->dev
,
2947 "triggering global reset in reset timer\n");
2948 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->default_reset_request
);
2949 hclge_reset_event(hdev
->pdev
, NULL
);
2952 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
2954 /* check if there is any ongoing reset in the hardware. This status can
2955 * be checked from reset_pending. If there is then, we need to wait for
2956 * hardware to complete reset.
2957 * a. If we are able to figure out in reasonable time that hardware
2958 * has fully resetted then, we can proceed with driver, client
2960 * b. else, we can come back later to check this status so re-sched
2963 hdev
->last_reset_time
= jiffies
;
2964 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_pending
);
2965 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2968 /* check if we got any *new* reset requests to be honored */
2969 hdev
->reset_type
= hclge_get_reset_level(hdev
, &hdev
->reset_request
);
2970 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
2971 hclge_do_reset(hdev
);
2973 hdev
->reset_type
= HNAE3_NONE_RESET
;
2976 static void hclge_reset_service_task(struct work_struct
*work
)
2978 struct hclge_dev
*hdev
=
2979 container_of(work
, struct hclge_dev
, rst_service_task
);
2981 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
2984 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
2986 hclge_reset_subtask(hdev
);
2988 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
2991 static void hclge_mailbox_service_task(struct work_struct
*work
)
2993 struct hclge_dev
*hdev
=
2994 container_of(work
, struct hclge_dev
, mbx_service_task
);
2996 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
2999 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
3001 hclge_mbx_handler(hdev
);
3003 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
3006 static void hclge_update_vport_alive(struct hclge_dev
*hdev
)
3010 /* start from vport 1 for PF is always alive */
3011 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++) {
3012 struct hclge_vport
*vport
= &hdev
->vport
[i
];
3014 if (time_after(jiffies
, vport
->last_active_jiffies
+ 8 * HZ
))
3015 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
3017 /* If vf is not alive, set to default value */
3018 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
3019 vport
->mps
= HCLGE_MAC_DEFAULT_FRAME
;
3023 static void hclge_service_task(struct work_struct
*work
)
3025 struct hclge_dev
*hdev
=
3026 container_of(work
, struct hclge_dev
, service_task
);
3028 if (hdev
->hw_stats
.stats_timer
>= HCLGE_STATS_TIMER_INTERVAL
) {
3029 hclge_update_stats_for_all(hdev
);
3030 hdev
->hw_stats
.stats_timer
= 0;
3033 hclge_update_speed_duplex(hdev
);
3034 hclge_update_link_status(hdev
);
3035 hclge_update_vport_alive(hdev
);
3036 hclge_service_complete(hdev
);
3039 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
3041 /* VF handle has no client */
3042 if (!handle
->client
)
3043 return container_of(handle
, struct hclge_vport
, nic
);
3044 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
3045 return container_of(handle
, struct hclge_vport
, roce
);
3047 return container_of(handle
, struct hclge_vport
, nic
);
3050 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
3051 struct hnae3_vector_info
*vector_info
)
3053 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3054 struct hnae3_vector_info
*vector
= vector_info
;
3055 struct hclge_dev
*hdev
= vport
->back
;
3059 vector_num
= min(hdev
->num_msi_left
, vector_num
);
3061 for (j
= 0; j
< vector_num
; j
++) {
3062 for (i
= 1; i
< hdev
->num_msi
; i
++) {
3063 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
3064 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
3065 vector
->io_addr
= hdev
->hw
.io_base
+
3066 HCLGE_VECTOR_REG_BASE
+
3067 (i
- 1) * HCLGE_VECTOR_REG_OFFSET
+
3069 HCLGE_VECTOR_VF_OFFSET
;
3070 hdev
->vector_status
[i
] = vport
->vport_id
;
3071 hdev
->vector_irq
[i
] = vector
->vector
;
3080 hdev
->num_msi_left
-= alloc
;
3081 hdev
->num_msi_used
+= alloc
;
3086 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
3090 for (i
= 0; i
< hdev
->num_msi
; i
++)
3091 if (vector
== hdev
->vector_irq
[i
])
3097 static int hclge_put_vector(struct hnae3_handle
*handle
, int vector
)
3099 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3100 struct hclge_dev
*hdev
= vport
->back
;
3103 vector_id
= hclge_get_vector_index(hdev
, vector
);
3104 if (vector_id
< 0) {
3105 dev_err(&hdev
->pdev
->dev
,
3106 "Get vector index fail. vector_id =%d\n", vector_id
);
3110 hclge_free_vector(hdev
, vector_id
);
3115 static u32
hclge_get_rss_key_size(struct hnae3_handle
*handle
)
3117 return HCLGE_RSS_KEY_SIZE
;
3120 static u32
hclge_get_rss_indir_size(struct hnae3_handle
*handle
)
3122 return HCLGE_RSS_IND_TBL_SIZE
;
3125 static int hclge_set_rss_algo_key(struct hclge_dev
*hdev
,
3126 const u8 hfunc
, const u8
*key
)
3128 struct hclge_rss_config_cmd
*req
;
3129 struct hclge_desc desc
;
3134 req
= (struct hclge_rss_config_cmd
*)desc
.data
;
3136 for (key_offset
= 0; key_offset
< 3; key_offset
++) {
3137 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_GENERIC_CONFIG
,
3140 req
->hash_config
|= (hfunc
& HCLGE_RSS_HASH_ALGO_MASK
);
3141 req
->hash_config
|= (key_offset
<< HCLGE_RSS_HASH_KEY_OFFSET_B
);
3143 if (key_offset
== 2)
3145 HCLGE_RSS_KEY_SIZE
- HCLGE_RSS_HASH_KEY_NUM
* 2;
3147 key_size
= HCLGE_RSS_HASH_KEY_NUM
;
3149 memcpy(req
->hash_key
,
3150 key
+ key_offset
* HCLGE_RSS_HASH_KEY_NUM
, key_size
);
3152 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3154 dev_err(&hdev
->pdev
->dev
,
3155 "Configure RSS config fail, status = %d\n",
3163 static int hclge_set_rss_indir_table(struct hclge_dev
*hdev
, const u8
*indir
)
3165 struct hclge_rss_indirection_table_cmd
*req
;
3166 struct hclge_desc desc
;
3170 req
= (struct hclge_rss_indirection_table_cmd
*)desc
.data
;
3172 for (i
= 0; i
< HCLGE_RSS_CFG_TBL_NUM
; i
++) {
3173 hclge_cmd_setup_basic_desc
3174 (&desc
, HCLGE_OPC_RSS_INDIR_TABLE
, false);
3176 req
->start_table_index
=
3177 cpu_to_le16(i
* HCLGE_RSS_CFG_TBL_SIZE
);
3178 req
->rss_set_bitmap
= cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK
);
3180 for (j
= 0; j
< HCLGE_RSS_CFG_TBL_SIZE
; j
++)
3181 req
->rss_result
[j
] =
3182 indir
[i
* HCLGE_RSS_CFG_TBL_SIZE
+ j
];
3184 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3186 dev_err(&hdev
->pdev
->dev
,
3187 "Configure rss indir table fail,status = %d\n",
3195 static int hclge_set_rss_tc_mode(struct hclge_dev
*hdev
, u16
*tc_valid
,
3196 u16
*tc_size
, u16
*tc_offset
)
3198 struct hclge_rss_tc_mode_cmd
*req
;
3199 struct hclge_desc desc
;
3203 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_TC_MODE
, false);
3204 req
= (struct hclge_rss_tc_mode_cmd
*)desc
.data
;
3206 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3209 hnae3_set_bit(mode
, HCLGE_RSS_TC_VALID_B
, (tc_valid
[i
] & 0x1));
3210 hnae3_set_field(mode
, HCLGE_RSS_TC_SIZE_M
,
3211 HCLGE_RSS_TC_SIZE_S
, tc_size
[i
]);
3212 hnae3_set_field(mode
, HCLGE_RSS_TC_OFFSET_M
,
3213 HCLGE_RSS_TC_OFFSET_S
, tc_offset
[i
]);
3215 req
->rss_tc_mode
[i
] = cpu_to_le16(mode
);
3218 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3220 dev_err(&hdev
->pdev
->dev
,
3221 "Configure rss tc mode fail, status = %d\n", ret
);
3226 static void hclge_get_rss_type(struct hclge_vport
*vport
)
3228 if (vport
->rss_tuple_sets
.ipv4_tcp_en
||
3229 vport
->rss_tuple_sets
.ipv4_udp_en
||
3230 vport
->rss_tuple_sets
.ipv4_sctp_en
||
3231 vport
->rss_tuple_sets
.ipv6_tcp_en
||
3232 vport
->rss_tuple_sets
.ipv6_udp_en
||
3233 vport
->rss_tuple_sets
.ipv6_sctp_en
)
3234 vport
->nic
.kinfo
.rss_type
= PKT_HASH_TYPE_L4
;
3235 else if (vport
->rss_tuple_sets
.ipv4_fragment_en
||
3236 vport
->rss_tuple_sets
.ipv6_fragment_en
)
3237 vport
->nic
.kinfo
.rss_type
= PKT_HASH_TYPE_L3
;
3239 vport
->nic
.kinfo
.rss_type
= PKT_HASH_TYPE_NONE
;
3242 static int hclge_set_rss_input_tuple(struct hclge_dev
*hdev
)
3244 struct hclge_rss_input_tuple_cmd
*req
;
3245 struct hclge_desc desc
;
3248 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3250 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3252 /* Get the tuple cfg from pf */
3253 req
->ipv4_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_tcp_en
;
3254 req
->ipv4_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_udp_en
;
3255 req
->ipv4_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_sctp_en
;
3256 req
->ipv4_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv4_fragment_en
;
3257 req
->ipv6_tcp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_tcp_en
;
3258 req
->ipv6_udp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_udp_en
;
3259 req
->ipv6_sctp_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_sctp_en
;
3260 req
->ipv6_fragment_en
= hdev
->vport
[0].rss_tuple_sets
.ipv6_fragment_en
;
3261 hclge_get_rss_type(&hdev
->vport
[0]);
3262 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3264 dev_err(&hdev
->pdev
->dev
,
3265 "Configure rss input fail, status = %d\n", ret
);
3269 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
3272 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3275 /* Get hash algorithm */
3277 switch (vport
->rss_algo
) {
3278 case HCLGE_RSS_HASH_ALGO_TOEPLITZ
:
3279 *hfunc
= ETH_RSS_HASH_TOP
;
3281 case HCLGE_RSS_HASH_ALGO_SIMPLE
:
3282 *hfunc
= ETH_RSS_HASH_XOR
;
3285 *hfunc
= ETH_RSS_HASH_UNKNOWN
;
3290 /* Get the RSS Key required by the user */
3292 memcpy(key
, vport
->rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3294 /* Get indirect table */
3296 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3297 indir
[i
] = vport
->rss_indirection_tbl
[i
];
3302 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
3303 const u8
*key
, const u8 hfunc
)
3305 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3306 struct hclge_dev
*hdev
= vport
->back
;
3310 /* Set the RSS Hash Key if specififed by the user */
3313 case ETH_RSS_HASH_TOP
:
3314 hash_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3316 case ETH_RSS_HASH_XOR
:
3317 hash_algo
= HCLGE_RSS_HASH_ALGO_SIMPLE
;
3319 case ETH_RSS_HASH_NO_CHANGE
:
3320 hash_algo
= vport
->rss_algo
;
3326 ret
= hclge_set_rss_algo_key(hdev
, hash_algo
, key
);
3330 /* Update the shadow RSS key with user specified qids */
3331 memcpy(vport
->rss_hash_key
, key
, HCLGE_RSS_KEY_SIZE
);
3332 vport
->rss_algo
= hash_algo
;
3335 /* Update the shadow RSS table with user specified qids */
3336 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3337 vport
->rss_indirection_tbl
[i
] = indir
[i
];
3339 /* Update the hardware */
3340 return hclge_set_rss_indir_table(hdev
, vport
->rss_indirection_tbl
);
3343 static u8
hclge_get_rss_hash_bits(struct ethtool_rxnfc
*nfc
)
3345 u8 hash_sets
= nfc
->data
& RXH_L4_B_0_1
? HCLGE_S_PORT_BIT
: 0;
3347 if (nfc
->data
& RXH_L4_B_2_3
)
3348 hash_sets
|= HCLGE_D_PORT_BIT
;
3350 hash_sets
&= ~HCLGE_D_PORT_BIT
;
3352 if (nfc
->data
& RXH_IP_SRC
)
3353 hash_sets
|= HCLGE_S_IP_BIT
;
3355 hash_sets
&= ~HCLGE_S_IP_BIT
;
3357 if (nfc
->data
& RXH_IP_DST
)
3358 hash_sets
|= HCLGE_D_IP_BIT
;
3360 hash_sets
&= ~HCLGE_D_IP_BIT
;
3362 if (nfc
->flow_type
== SCTP_V4_FLOW
|| nfc
->flow_type
== SCTP_V6_FLOW
)
3363 hash_sets
|= HCLGE_V_TAG_BIT
;
3368 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
3369 struct ethtool_rxnfc
*nfc
)
3371 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3372 struct hclge_dev
*hdev
= vport
->back
;
3373 struct hclge_rss_input_tuple_cmd
*req
;
3374 struct hclge_desc desc
;
3378 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
3379 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
3382 req
= (struct hclge_rss_input_tuple_cmd
*)desc
.data
;
3383 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RSS_INPUT_TUPLE
, false);
3385 req
->ipv4_tcp_en
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3386 req
->ipv4_udp_en
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3387 req
->ipv4_sctp_en
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3388 req
->ipv4_fragment_en
= vport
->rss_tuple_sets
.ipv4_fragment_en
;
3389 req
->ipv6_tcp_en
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3390 req
->ipv6_udp_en
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3391 req
->ipv6_sctp_en
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3392 req
->ipv6_fragment_en
= vport
->rss_tuple_sets
.ipv6_fragment_en
;
3394 tuple_sets
= hclge_get_rss_hash_bits(nfc
);
3395 switch (nfc
->flow_type
) {
3397 req
->ipv4_tcp_en
= tuple_sets
;
3400 req
->ipv6_tcp_en
= tuple_sets
;
3403 req
->ipv4_udp_en
= tuple_sets
;
3406 req
->ipv6_udp_en
= tuple_sets
;
3409 req
->ipv4_sctp_en
= tuple_sets
;
3412 if ((nfc
->data
& RXH_L4_B_0_1
) ||
3413 (nfc
->data
& RXH_L4_B_2_3
))
3416 req
->ipv6_sctp_en
= tuple_sets
;
3419 req
->ipv4_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3422 req
->ipv6_fragment_en
= HCLGE_RSS_INPUT_TUPLE_OTHER
;
3428 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3430 dev_err(&hdev
->pdev
->dev
,
3431 "Set rss tuple fail, status = %d\n", ret
);
3435 vport
->rss_tuple_sets
.ipv4_tcp_en
= req
->ipv4_tcp_en
;
3436 vport
->rss_tuple_sets
.ipv4_udp_en
= req
->ipv4_udp_en
;
3437 vport
->rss_tuple_sets
.ipv4_sctp_en
= req
->ipv4_sctp_en
;
3438 vport
->rss_tuple_sets
.ipv4_fragment_en
= req
->ipv4_fragment_en
;
3439 vport
->rss_tuple_sets
.ipv6_tcp_en
= req
->ipv6_tcp_en
;
3440 vport
->rss_tuple_sets
.ipv6_udp_en
= req
->ipv6_udp_en
;
3441 vport
->rss_tuple_sets
.ipv6_sctp_en
= req
->ipv6_sctp_en
;
3442 vport
->rss_tuple_sets
.ipv6_fragment_en
= req
->ipv6_fragment_en
;
3443 hclge_get_rss_type(vport
);
3447 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
3448 struct ethtool_rxnfc
*nfc
)
3450 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3455 switch (nfc
->flow_type
) {
3457 tuple_sets
= vport
->rss_tuple_sets
.ipv4_tcp_en
;
3460 tuple_sets
= vport
->rss_tuple_sets
.ipv4_udp_en
;
3463 tuple_sets
= vport
->rss_tuple_sets
.ipv6_tcp_en
;
3466 tuple_sets
= vport
->rss_tuple_sets
.ipv6_udp_en
;
3469 tuple_sets
= vport
->rss_tuple_sets
.ipv4_sctp_en
;
3472 tuple_sets
= vport
->rss_tuple_sets
.ipv6_sctp_en
;
3476 tuple_sets
= HCLGE_S_IP_BIT
| HCLGE_D_IP_BIT
;
3485 if (tuple_sets
& HCLGE_D_PORT_BIT
)
3486 nfc
->data
|= RXH_L4_B_2_3
;
3487 if (tuple_sets
& HCLGE_S_PORT_BIT
)
3488 nfc
->data
|= RXH_L4_B_0_1
;
3489 if (tuple_sets
& HCLGE_D_IP_BIT
)
3490 nfc
->data
|= RXH_IP_DST
;
3491 if (tuple_sets
& HCLGE_S_IP_BIT
)
3492 nfc
->data
|= RXH_IP_SRC
;
3497 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
3499 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3500 struct hclge_dev
*hdev
= vport
->back
;
3502 return hdev
->rss_size_max
;
3505 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
3507 struct hclge_vport
*vport
= hdev
->vport
;
3508 u8
*rss_indir
= vport
[0].rss_indirection_tbl
;
3509 u16 rss_size
= vport
[0].alloc_rss_size
;
3510 u8
*key
= vport
[0].rss_hash_key
;
3511 u8 hfunc
= vport
[0].rss_algo
;
3512 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
3513 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
3514 u16 tc_size
[HCLGE_MAX_TC_NUM
];
3518 ret
= hclge_set_rss_indir_table(hdev
, rss_indir
);
3522 ret
= hclge_set_rss_algo_key(hdev
, hfunc
, key
);
3526 ret
= hclge_set_rss_input_tuple(hdev
);
3530 /* Each TC have the same queue size, and tc_size set to hardware is
3531 * the log2 of roundup power of two of rss_size, the acutal queue
3532 * size is limited by indirection table.
3534 if (rss_size
> HCLGE_RSS_TC_SIZE_7
|| rss_size
== 0) {
3535 dev_err(&hdev
->pdev
->dev
,
3536 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3541 roundup_size
= roundup_pow_of_two(rss_size
);
3542 roundup_size
= ilog2(roundup_size
);
3544 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
3547 if (!(hdev
->hw_tc_map
& BIT(i
)))
3551 tc_size
[i
] = roundup_size
;
3552 tc_offset
[i
] = rss_size
* i
;
3555 return hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
3558 void hclge_rss_indir_init_cfg(struct hclge_dev
*hdev
)
3560 struct hclge_vport
*vport
= hdev
->vport
;
3563 for (j
= 0; j
< hdev
->num_vmdq_vport
+ 1; j
++) {
3564 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
3565 vport
[j
].rss_indirection_tbl
[i
] =
3566 i
% vport
[j
].alloc_rss_size
;
3570 static void hclge_rss_init_cfg(struct hclge_dev
*hdev
)
3572 struct hclge_vport
*vport
= hdev
->vport
;
3575 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
3576 vport
[i
].rss_tuple_sets
.ipv4_tcp_en
=
3577 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3578 vport
[i
].rss_tuple_sets
.ipv4_udp_en
=
3579 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3580 vport
[i
].rss_tuple_sets
.ipv4_sctp_en
=
3581 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3582 vport
[i
].rss_tuple_sets
.ipv4_fragment_en
=
3583 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3584 vport
[i
].rss_tuple_sets
.ipv6_tcp_en
=
3585 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3586 vport
[i
].rss_tuple_sets
.ipv6_udp_en
=
3587 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3588 vport
[i
].rss_tuple_sets
.ipv6_sctp_en
=
3589 HCLGE_RSS_INPUT_TUPLE_SCTP
;
3590 vport
[i
].rss_tuple_sets
.ipv6_fragment_en
=
3591 HCLGE_RSS_INPUT_TUPLE_OTHER
;
3593 vport
[i
].rss_algo
= HCLGE_RSS_HASH_ALGO_TOEPLITZ
;
3595 netdev_rss_key_fill(vport
[i
].rss_hash_key
, HCLGE_RSS_KEY_SIZE
);
3598 hclge_rss_indir_init_cfg(hdev
);
3601 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
3602 int vector_id
, bool en
,
3603 struct hnae3_ring_chain_node
*ring_chain
)
3605 struct hclge_dev
*hdev
= vport
->back
;
3606 struct hnae3_ring_chain_node
*node
;
3607 struct hclge_desc desc
;
3608 struct hclge_ctrl_vector_chain_cmd
*req
3609 = (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
3610 enum hclge_cmd_status status
;
3611 enum hclge_opcode_type op
;
3612 u16 tqp_type_and_id
;
3615 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
3616 hclge_cmd_setup_basic_desc(&desc
, op
, false);
3617 req
->int_vector_id
= vector_id
;
3620 for (node
= ring_chain
; node
; node
= node
->next
) {
3621 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
3622 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
3624 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
3625 hnae3_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
3626 HCLGE_TQP_ID_S
, node
->tqp_index
);
3627 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_GL_IDX_M
,
3629 hnae3_get_field(node
->int_gl_idx
,
3630 HNAE3_RING_GL_IDX_M
,
3631 HNAE3_RING_GL_IDX_S
));
3632 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
3633 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
3634 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
3635 req
->vfid
= vport
->vport_id
;
3637 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3639 dev_err(&hdev
->pdev
->dev
,
3640 "Map TQP fail, status is %d.\n",
3646 hclge_cmd_setup_basic_desc(&desc
,
3649 req
->int_vector_id
= vector_id
;
3654 req
->int_cause_num
= i
;
3655 req
->vfid
= vport
->vport_id
;
3656 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3658 dev_err(&hdev
->pdev
->dev
,
3659 "Map TQP fail, status is %d.\n", status
);
3667 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
,
3669 struct hnae3_ring_chain_node
*ring_chain
)
3671 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3672 struct hclge_dev
*hdev
= vport
->back
;
3675 vector_id
= hclge_get_vector_index(hdev
, vector
);
3676 if (vector_id
< 0) {
3677 dev_err(&hdev
->pdev
->dev
,
3678 "Get vector index fail. vector_id =%d\n", vector_id
);
3682 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
3685 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
,
3687 struct hnae3_ring_chain_node
*ring_chain
)
3689 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3690 struct hclge_dev
*hdev
= vport
->back
;
3693 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
3696 vector_id
= hclge_get_vector_index(hdev
, vector
);
3697 if (vector_id
< 0) {
3698 dev_err(&handle
->pdev
->dev
,
3699 "Get vector index fail. ret =%d\n", vector_id
);
3703 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
3705 dev_err(&handle
->pdev
->dev
,
3706 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3713 int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
,
3714 struct hclge_promisc_param
*param
)
3716 struct hclge_promisc_cfg_cmd
*req
;
3717 struct hclge_desc desc
;
3720 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
3722 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
3723 req
->vf_id
= param
->vf_id
;
3725 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3726 * pdev revision(0x20), new revision support them. The
3727 * value of this two fields will not return error when driver
3728 * send command to fireware in revision(0x20).
3730 req
->flag
= (param
->enable
<< HCLGE_PROMISC_EN_B
) |
3731 HCLGE_PROMISC_TX_EN_B
| HCLGE_PROMISC_RX_EN_B
;
3733 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3735 dev_err(&hdev
->pdev
->dev
,
3736 "Set promisc mode fail, status is %d.\n", ret
);
3741 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
3742 bool en_mc
, bool en_bc
, int vport_id
)
3747 memset(param
, 0, sizeof(struct hclge_promisc_param
));
3749 param
->enable
= HCLGE_PROMISC_EN_UC
;
3751 param
->enable
|= HCLGE_PROMISC_EN_MC
;
3753 param
->enable
|= HCLGE_PROMISC_EN_BC
;
3754 param
->vf_id
= vport_id
;
3757 static int hclge_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
3760 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3761 struct hclge_dev
*hdev
= vport
->back
;
3762 struct hclge_promisc_param param
;
3764 hclge_promisc_param_init(¶m
, en_uc_pmc
, en_mc_pmc
, true,
3766 return hclge_cmd_set_promisc_mode(hdev
, ¶m
);
3769 static int hclge_get_fd_mode(struct hclge_dev
*hdev
, u8
*fd_mode
)
3771 struct hclge_get_fd_mode_cmd
*req
;
3772 struct hclge_desc desc
;
3775 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_MODE_CTRL
, true);
3777 req
= (struct hclge_get_fd_mode_cmd
*)desc
.data
;
3779 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3781 dev_err(&hdev
->pdev
->dev
, "get fd mode fail, ret=%d\n", ret
);
3785 *fd_mode
= req
->mode
;
3790 static int hclge_get_fd_allocation(struct hclge_dev
*hdev
,
3791 u32
*stage1_entry_num
,
3792 u32
*stage2_entry_num
,
3793 u16
*stage1_counter_num
,
3794 u16
*stage2_counter_num
)
3796 struct hclge_get_fd_allocation_cmd
*req
;
3797 struct hclge_desc desc
;
3800 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_GET_ALLOCATION
, true);
3802 req
= (struct hclge_get_fd_allocation_cmd
*)desc
.data
;
3804 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3806 dev_err(&hdev
->pdev
->dev
, "query fd allocation fail, ret=%d\n",
3811 *stage1_entry_num
= le32_to_cpu(req
->stage1_entry_num
);
3812 *stage2_entry_num
= le32_to_cpu(req
->stage2_entry_num
);
3813 *stage1_counter_num
= le16_to_cpu(req
->stage1_counter_num
);
3814 *stage2_counter_num
= le16_to_cpu(req
->stage2_counter_num
);
3819 static int hclge_set_fd_key_config(struct hclge_dev
*hdev
, int stage_num
)
3821 struct hclge_set_fd_key_config_cmd
*req
;
3822 struct hclge_fd_key_cfg
*stage
;
3823 struct hclge_desc desc
;
3826 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_KEY_CONFIG
, false);
3828 req
= (struct hclge_set_fd_key_config_cmd
*)desc
.data
;
3829 stage
= &hdev
->fd_cfg
.key_cfg
[stage_num
];
3830 req
->stage
= stage_num
;
3831 req
->key_select
= stage
->key_sel
;
3832 req
->inner_sipv6_word_en
= stage
->inner_sipv6_word_en
;
3833 req
->inner_dipv6_word_en
= stage
->inner_dipv6_word_en
;
3834 req
->outer_sipv6_word_en
= stage
->outer_sipv6_word_en
;
3835 req
->outer_dipv6_word_en
= stage
->outer_dipv6_word_en
;
3836 req
->tuple_mask
= cpu_to_le32(~stage
->tuple_active
);
3837 req
->meta_data_mask
= cpu_to_le32(~stage
->meta_data_active
);
3839 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3841 dev_err(&hdev
->pdev
->dev
, "set fd key fail, ret=%d\n", ret
);
3846 static int hclge_init_fd_config(struct hclge_dev
*hdev
)
3848 #define LOW_2_WORDS 0x03
3849 struct hclge_fd_key_cfg
*key_cfg
;
3852 if (!hnae3_dev_fd_supported(hdev
))
3855 ret
= hclge_get_fd_mode(hdev
, &hdev
->fd_cfg
.fd_mode
);
3859 switch (hdev
->fd_cfg
.fd_mode
) {
3860 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
:
3861 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
;
3863 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1
:
3864 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
/ 2;
3867 dev_err(&hdev
->pdev
->dev
,
3868 "Unsupported flow director mode %d\n",
3869 hdev
->fd_cfg
.fd_mode
);
3873 hdev
->fd_cfg
.fd_en
= true;
3874 hdev
->fd_cfg
.proto_support
=
3875 TCP_V4_FLOW
| UDP_V4_FLOW
| SCTP_V4_FLOW
| TCP_V6_FLOW
|
3876 UDP_V6_FLOW
| SCTP_V6_FLOW
| IPV4_USER_FLOW
| IPV6_USER_FLOW
;
3877 key_cfg
= &hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
];
3878 key_cfg
->key_sel
= HCLGE_FD_KEY_BASE_ON_TUPLE
,
3879 key_cfg
->inner_sipv6_word_en
= LOW_2_WORDS
;
3880 key_cfg
->inner_dipv6_word_en
= LOW_2_WORDS
;
3881 key_cfg
->outer_sipv6_word_en
= 0;
3882 key_cfg
->outer_dipv6_word_en
= 0;
3884 key_cfg
->tuple_active
= BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_ETH_TYPE
) |
3885 BIT(INNER_IP_PROTO
) | BIT(INNER_IP_TOS
) |
3886 BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
3887 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
3889 /* If use max 400bit key, we can support tuples for ether type */
3890 if (hdev
->fd_cfg
.max_key_length
== MAX_KEY_LENGTH
) {
3891 hdev
->fd_cfg
.proto_support
|= ETHER_FLOW
;
3892 key_cfg
->tuple_active
|=
3893 BIT(INNER_DST_MAC
) | BIT(INNER_SRC_MAC
);
3896 /* roce_type is used to filter roce frames
3897 * dst_vport is used to specify the rule
3899 key_cfg
->meta_data_active
= BIT(ROCE_TYPE
) | BIT(DST_VPORT
);
3901 ret
= hclge_get_fd_allocation(hdev
,
3902 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
],
3903 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_2
],
3904 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
],
3905 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_2
]);
3909 return hclge_set_fd_key_config(hdev
, HCLGE_FD_STAGE_1
);
3912 static int hclge_fd_tcam_config(struct hclge_dev
*hdev
, u8 stage
, bool sel_x
,
3913 int loc
, u8
*key
, bool is_add
)
3915 struct hclge_fd_tcam_config_1_cmd
*req1
;
3916 struct hclge_fd_tcam_config_2_cmd
*req2
;
3917 struct hclge_fd_tcam_config_3_cmd
*req3
;
3918 struct hclge_desc desc
[3];
3921 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_FD_TCAM_OP
, false);
3922 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3923 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_FD_TCAM_OP
, false);
3924 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
3925 hclge_cmd_setup_basic_desc(&desc
[2], HCLGE_OPC_FD_TCAM_OP
, false);
3927 req1
= (struct hclge_fd_tcam_config_1_cmd
*)desc
[0].data
;
3928 req2
= (struct hclge_fd_tcam_config_2_cmd
*)desc
[1].data
;
3929 req3
= (struct hclge_fd_tcam_config_3_cmd
*)desc
[2].data
;
3931 req1
->stage
= stage
;
3932 req1
->xy_sel
= sel_x
? 1 : 0;
3933 hnae3_set_bit(req1
->port_info
, HCLGE_FD_EPORT_SW_EN_B
, 0);
3934 req1
->index
= cpu_to_le32(loc
);
3935 req1
->entry_vld
= sel_x
? is_add
: 0;
3938 memcpy(req1
->tcam_data
, &key
[0], sizeof(req1
->tcam_data
));
3939 memcpy(req2
->tcam_data
, &key
[sizeof(req1
->tcam_data
)],
3940 sizeof(req2
->tcam_data
));
3941 memcpy(req3
->tcam_data
, &key
[sizeof(req1
->tcam_data
) +
3942 sizeof(req2
->tcam_data
)], sizeof(req3
->tcam_data
));
3945 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
3947 dev_err(&hdev
->pdev
->dev
,
3948 "config tcam key fail, ret=%d\n",
3954 static int hclge_fd_ad_config(struct hclge_dev
*hdev
, u8 stage
, int loc
,
3955 struct hclge_fd_ad_data
*action
)
3957 struct hclge_fd_ad_config_cmd
*req
;
3958 struct hclge_desc desc
;
3962 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_AD_OP
, false);
3964 req
= (struct hclge_fd_ad_config_cmd
*)desc
.data
;
3965 req
->index
= cpu_to_le32(loc
);
3968 hnae3_set_bit(ad_data
, HCLGE_FD_AD_WR_RULE_ID_B
,
3969 action
->write_rule_id_to_bd
);
3970 hnae3_set_field(ad_data
, HCLGE_FD_AD_RULE_ID_M
, HCLGE_FD_AD_RULE_ID_S
,
3973 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DROP_B
, action
->drop_packet
);
3974 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DIRECT_QID_B
,
3975 action
->forward_to_direct_queue
);
3976 hnae3_set_field(ad_data
, HCLGE_FD_AD_QID_M
, HCLGE_FD_AD_QID_S
,
3978 hnae3_set_bit(ad_data
, HCLGE_FD_AD_USE_COUNTER_B
, action
->use_counter
);
3979 hnae3_set_field(ad_data
, HCLGE_FD_AD_COUNTER_NUM_M
,
3980 HCLGE_FD_AD_COUNTER_NUM_S
, action
->counter_id
);
3981 hnae3_set_bit(ad_data
, HCLGE_FD_AD_NXT_STEP_B
, action
->use_next_stage
);
3982 hnae3_set_field(ad_data
, HCLGE_FD_AD_NXT_KEY_M
, HCLGE_FD_AD_NXT_KEY_S
,
3983 action
->counter_id
);
3985 req
->ad_data
= cpu_to_le64(ad_data
);
3986 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3988 dev_err(&hdev
->pdev
->dev
, "fd ad config fail, ret=%d\n", ret
);
3993 static bool hclge_fd_convert_tuple(u32 tuple_bit
, u8
*key_x
, u8
*key_y
,
3994 struct hclge_fd_rule
*rule
)
3996 u16 tmp_x_s
, tmp_y_s
;
3997 u32 tmp_x_l
, tmp_y_l
;
4000 if (rule
->unused_tuple
& tuple_bit
)
4003 switch (tuple_bit
) {
4006 case BIT(INNER_DST_MAC
):
4007 for (i
= 0; i
< 6; i
++) {
4008 calc_x(key_x
[5 - i
], rule
->tuples
.dst_mac
[i
],
4009 rule
->tuples_mask
.dst_mac
[i
]);
4010 calc_y(key_y
[5 - i
], rule
->tuples
.dst_mac
[i
],
4011 rule
->tuples_mask
.dst_mac
[i
]);
4015 case BIT(INNER_SRC_MAC
):
4016 for (i
= 0; i
< 6; i
++) {
4017 calc_x(key_x
[5 - i
], rule
->tuples
.src_mac
[i
],
4018 rule
->tuples
.src_mac
[i
]);
4019 calc_y(key_y
[5 - i
], rule
->tuples
.src_mac
[i
],
4020 rule
->tuples
.src_mac
[i
]);
4024 case BIT(INNER_VLAN_TAG_FST
):
4025 calc_x(tmp_x_s
, rule
->tuples
.vlan_tag1
,
4026 rule
->tuples_mask
.vlan_tag1
);
4027 calc_y(tmp_y_s
, rule
->tuples
.vlan_tag1
,
4028 rule
->tuples_mask
.vlan_tag1
);
4029 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4030 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4033 case BIT(INNER_ETH_TYPE
):
4034 calc_x(tmp_x_s
, rule
->tuples
.ether_proto
,
4035 rule
->tuples_mask
.ether_proto
);
4036 calc_y(tmp_y_s
, rule
->tuples
.ether_proto
,
4037 rule
->tuples_mask
.ether_proto
);
4038 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4039 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4042 case BIT(INNER_IP_TOS
):
4043 calc_x(*key_x
, rule
->tuples
.ip_tos
, rule
->tuples_mask
.ip_tos
);
4044 calc_y(*key_y
, rule
->tuples
.ip_tos
, rule
->tuples_mask
.ip_tos
);
4047 case BIT(INNER_IP_PROTO
):
4048 calc_x(*key_x
, rule
->tuples
.ip_proto
,
4049 rule
->tuples_mask
.ip_proto
);
4050 calc_y(*key_y
, rule
->tuples
.ip_proto
,
4051 rule
->tuples_mask
.ip_proto
);
4054 case BIT(INNER_SRC_IP
):
4055 calc_x(tmp_x_l
, rule
->tuples
.src_ip
[3],
4056 rule
->tuples_mask
.src_ip
[3]);
4057 calc_y(tmp_y_l
, rule
->tuples
.src_ip
[3],
4058 rule
->tuples_mask
.src_ip
[3]);
4059 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
4060 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
4063 case BIT(INNER_DST_IP
):
4064 calc_x(tmp_x_l
, rule
->tuples
.dst_ip
[3],
4065 rule
->tuples_mask
.dst_ip
[3]);
4066 calc_y(tmp_y_l
, rule
->tuples
.dst_ip
[3],
4067 rule
->tuples_mask
.dst_ip
[3]);
4068 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
4069 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
4072 case BIT(INNER_SRC_PORT
):
4073 calc_x(tmp_x_s
, rule
->tuples
.src_port
,
4074 rule
->tuples_mask
.src_port
);
4075 calc_y(tmp_y_s
, rule
->tuples
.src_port
,
4076 rule
->tuples_mask
.src_port
);
4077 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4078 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4081 case BIT(INNER_DST_PORT
):
4082 calc_x(tmp_x_s
, rule
->tuples
.dst_port
,
4083 rule
->tuples_mask
.dst_port
);
4084 calc_y(tmp_y_s
, rule
->tuples
.dst_port
,
4085 rule
->tuples_mask
.dst_port
);
4086 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
4087 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
4095 static u32
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type
, u8 pf_id
,
4096 u8 vf_id
, u8 network_port_id
)
4098 u32 port_number
= 0;
4100 if (port_type
== HOST_PORT
) {
4101 hnae3_set_field(port_number
, HCLGE_PF_ID_M
, HCLGE_PF_ID_S
,
4103 hnae3_set_field(port_number
, HCLGE_VF_ID_M
, HCLGE_VF_ID_S
,
4105 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, HOST_PORT
);
4107 hnae3_set_field(port_number
, HCLGE_NETWORK_PORT_ID_M
,
4108 HCLGE_NETWORK_PORT_ID_S
, network_port_id
);
4109 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, NETWORK_PORT
);
4115 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg
*key_cfg
,
4116 __le32
*key_x
, __le32
*key_y
,
4117 struct hclge_fd_rule
*rule
)
4119 u32 tuple_bit
, meta_data
= 0, tmp_x
, tmp_y
, port_number
;
4120 u8 cur_pos
= 0, tuple_size
, shift_bits
;
4123 for (i
= 0; i
< MAX_META_DATA
; i
++) {
4124 tuple_size
= meta_data_key_info
[i
].key_length
;
4125 tuple_bit
= key_cfg
->meta_data_active
& BIT(i
);
4127 switch (tuple_bit
) {
4128 case BIT(ROCE_TYPE
):
4129 hnae3_set_bit(meta_data
, cur_pos
, NIC_PACKET
);
4130 cur_pos
+= tuple_size
;
4132 case BIT(DST_VPORT
):
4133 port_number
= hclge_get_port_number(HOST_PORT
, 0,
4135 hnae3_set_field(meta_data
,
4136 GENMASK(cur_pos
+ tuple_size
, cur_pos
),
4137 cur_pos
, port_number
);
4138 cur_pos
+= tuple_size
;
4145 calc_x(tmp_x
, meta_data
, 0xFFFFFFFF);
4146 calc_y(tmp_y
, meta_data
, 0xFFFFFFFF);
4147 shift_bits
= sizeof(meta_data
) * 8 - cur_pos
;
4149 *key_x
= cpu_to_le32(tmp_x
<< shift_bits
);
4150 *key_y
= cpu_to_le32(tmp_y
<< shift_bits
);
4153 /* A complete key is combined with meta data key and tuple key.
4154 * Meta data key is stored at the MSB region, and tuple key is stored at
4155 * the LSB region, unused bits will be filled 0.
4157 static int hclge_config_key(struct hclge_dev
*hdev
, u8 stage
,
4158 struct hclge_fd_rule
*rule
)
4160 struct hclge_fd_key_cfg
*key_cfg
= &hdev
->fd_cfg
.key_cfg
[stage
];
4161 u8 key_x
[MAX_KEY_BYTES
], key_y
[MAX_KEY_BYTES
];
4162 u8
*cur_key_x
, *cur_key_y
;
4163 int i
, ret
, tuple_size
;
4164 u8 meta_data_region
;
4166 memset(key_x
, 0, sizeof(key_x
));
4167 memset(key_y
, 0, sizeof(key_y
));
4171 for (i
= 0 ; i
< MAX_TUPLE
; i
++) {
4175 tuple_size
= tuple_key_info
[i
].key_length
/ 8;
4176 check_tuple
= key_cfg
->tuple_active
& BIT(i
);
4178 tuple_valid
= hclge_fd_convert_tuple(check_tuple
, cur_key_x
,
4181 cur_key_x
+= tuple_size
;
4182 cur_key_y
+= tuple_size
;
4186 meta_data_region
= hdev
->fd_cfg
.max_key_length
/ 8 -
4187 MAX_META_DATA_LENGTH
/ 8;
4189 hclge_fd_convert_meta_data(key_cfg
,
4190 (__le32
*)(key_x
+ meta_data_region
),
4191 (__le32
*)(key_y
+ meta_data_region
),
4194 ret
= hclge_fd_tcam_config(hdev
, stage
, false, rule
->location
, key_y
,
4197 dev_err(&hdev
->pdev
->dev
,
4198 "fd key_y config fail, loc=%d, ret=%d\n",
4199 rule
->queue_id
, ret
);
4203 ret
= hclge_fd_tcam_config(hdev
, stage
, true, rule
->location
, key_x
,
4206 dev_err(&hdev
->pdev
->dev
,
4207 "fd key_x config fail, loc=%d, ret=%d\n",
4208 rule
->queue_id
, ret
);
4212 static int hclge_config_action(struct hclge_dev
*hdev
, u8 stage
,
4213 struct hclge_fd_rule
*rule
)
4215 struct hclge_fd_ad_data ad_data
;
4217 ad_data
.ad_id
= rule
->location
;
4219 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
4220 ad_data
.drop_packet
= true;
4221 ad_data
.forward_to_direct_queue
= false;
4222 ad_data
.queue_id
= 0;
4224 ad_data
.drop_packet
= false;
4225 ad_data
.forward_to_direct_queue
= true;
4226 ad_data
.queue_id
= rule
->queue_id
;
4229 ad_data
.use_counter
= false;
4230 ad_data
.counter_id
= 0;
4232 ad_data
.use_next_stage
= false;
4233 ad_data
.next_input_key
= 0;
4235 ad_data
.write_rule_id_to_bd
= true;
4236 ad_data
.rule_id
= rule
->location
;
4238 return hclge_fd_ad_config(hdev
, stage
, ad_data
.ad_id
, &ad_data
);
4241 static int hclge_fd_check_spec(struct hclge_dev
*hdev
,
4242 struct ethtool_rx_flow_spec
*fs
, u32
*unused
)
4244 struct ethtool_tcpip4_spec
*tcp_ip4_spec
;
4245 struct ethtool_usrip4_spec
*usr_ip4_spec
;
4246 struct ethtool_tcpip6_spec
*tcp_ip6_spec
;
4247 struct ethtool_usrip6_spec
*usr_ip6_spec
;
4248 struct ethhdr
*ether_spec
;
4250 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
4253 if (!(fs
->flow_type
& hdev
->fd_cfg
.proto_support
))
4256 if ((fs
->flow_type
& FLOW_EXT
) &&
4257 (fs
->h_ext
.data
[0] != 0 || fs
->h_ext
.data
[1] != 0)) {
4258 dev_err(&hdev
->pdev
->dev
, "user-def bytes are not supported\n");
4262 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
4266 tcp_ip4_spec
= &fs
->h_u
.tcp_ip4_spec
;
4267 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
4269 if (!tcp_ip4_spec
->ip4src
)
4270 *unused
|= BIT(INNER_SRC_IP
);
4272 if (!tcp_ip4_spec
->ip4dst
)
4273 *unused
|= BIT(INNER_DST_IP
);
4275 if (!tcp_ip4_spec
->psrc
)
4276 *unused
|= BIT(INNER_SRC_PORT
);
4278 if (!tcp_ip4_spec
->pdst
)
4279 *unused
|= BIT(INNER_DST_PORT
);
4281 if (!tcp_ip4_spec
->tos
)
4282 *unused
|= BIT(INNER_IP_TOS
);
4286 usr_ip4_spec
= &fs
->h_u
.usr_ip4_spec
;
4287 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
4288 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
4290 if (!usr_ip4_spec
->ip4src
)
4291 *unused
|= BIT(INNER_SRC_IP
);
4293 if (!usr_ip4_spec
->ip4dst
)
4294 *unused
|= BIT(INNER_DST_IP
);
4296 if (!usr_ip4_spec
->tos
)
4297 *unused
|= BIT(INNER_IP_TOS
);
4299 if (!usr_ip4_spec
->proto
)
4300 *unused
|= BIT(INNER_IP_PROTO
);
4302 if (usr_ip4_spec
->l4_4_bytes
)
4305 if (usr_ip4_spec
->ip_ver
!= ETH_RX_NFC_IP4
)
4312 tcp_ip6_spec
= &fs
->h_u
.tcp_ip6_spec
;
4313 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
4316 if (!tcp_ip6_spec
->ip6src
[0] && !tcp_ip6_spec
->ip6src
[1] &&
4317 !tcp_ip6_spec
->ip6src
[2] && !tcp_ip6_spec
->ip6src
[3])
4318 *unused
|= BIT(INNER_SRC_IP
);
4320 if (!tcp_ip6_spec
->ip6dst
[0] && !tcp_ip6_spec
->ip6dst
[1] &&
4321 !tcp_ip6_spec
->ip6dst
[2] && !tcp_ip6_spec
->ip6dst
[3])
4322 *unused
|= BIT(INNER_DST_IP
);
4324 if (!tcp_ip6_spec
->psrc
)
4325 *unused
|= BIT(INNER_SRC_PORT
);
4327 if (!tcp_ip6_spec
->pdst
)
4328 *unused
|= BIT(INNER_DST_PORT
);
4330 if (tcp_ip6_spec
->tclass
)
4334 case IPV6_USER_FLOW
:
4335 usr_ip6_spec
= &fs
->h_u
.usr_ip6_spec
;
4336 *unused
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
4337 BIT(INNER_IP_TOS
) | BIT(INNER_SRC_PORT
) |
4338 BIT(INNER_DST_PORT
);
4340 if (!usr_ip6_spec
->ip6src
[0] && !usr_ip6_spec
->ip6src
[1] &&
4341 !usr_ip6_spec
->ip6src
[2] && !usr_ip6_spec
->ip6src
[3])
4342 *unused
|= BIT(INNER_SRC_IP
);
4344 if (!usr_ip6_spec
->ip6dst
[0] && !usr_ip6_spec
->ip6dst
[1] &&
4345 !usr_ip6_spec
->ip6dst
[2] && !usr_ip6_spec
->ip6dst
[3])
4346 *unused
|= BIT(INNER_DST_IP
);
4348 if (!usr_ip6_spec
->l4_proto
)
4349 *unused
|= BIT(INNER_IP_PROTO
);
4351 if (usr_ip6_spec
->tclass
)
4354 if (usr_ip6_spec
->l4_4_bytes
)
4359 ether_spec
= &fs
->h_u
.ether_spec
;
4360 *unused
|= BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
4361 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
) |
4362 BIT(INNER_IP_TOS
) | BIT(INNER_IP_PROTO
);
4364 if (is_zero_ether_addr(ether_spec
->h_source
))
4365 *unused
|= BIT(INNER_SRC_MAC
);
4367 if (is_zero_ether_addr(ether_spec
->h_dest
))
4368 *unused
|= BIT(INNER_DST_MAC
);
4370 if (!ether_spec
->h_proto
)
4371 *unused
|= BIT(INNER_ETH_TYPE
);
4378 if ((fs
->flow_type
& FLOW_EXT
)) {
4379 if (fs
->h_ext
.vlan_etype
)
4381 if (!fs
->h_ext
.vlan_tci
)
4382 *unused
|= BIT(INNER_VLAN_TAG_FST
);
4384 if (fs
->m_ext
.vlan_tci
) {
4385 if (be16_to_cpu(fs
->h_ext
.vlan_tci
) >= VLAN_N_VID
)
4389 *unused
|= BIT(INNER_VLAN_TAG_FST
);
4392 if (fs
->flow_type
& FLOW_MAC_EXT
) {
4393 if (!(hdev
->fd_cfg
.proto_support
& ETHER_FLOW
))
4396 if (is_zero_ether_addr(fs
->h_ext
.h_dest
))
4397 *unused
|= BIT(INNER_DST_MAC
);
4399 *unused
&= ~(BIT(INNER_DST_MAC
));
4405 static bool hclge_fd_rule_exist(struct hclge_dev
*hdev
, u16 location
)
4407 struct hclge_fd_rule
*rule
= NULL
;
4408 struct hlist_node
*node2
;
4410 hlist_for_each_entry_safe(rule
, node2
, &hdev
->fd_rule_list
, rule_node
) {
4411 if (rule
->location
>= location
)
4415 return rule
&& rule
->location
== location
;
4418 static int hclge_fd_update_rule_list(struct hclge_dev
*hdev
,
4419 struct hclge_fd_rule
*new_rule
,
4423 struct hclge_fd_rule
*rule
= NULL
, *parent
= NULL
;
4424 struct hlist_node
*node2
;
4426 if (is_add
&& !new_rule
)
4429 hlist_for_each_entry_safe(rule
, node2
,
4430 &hdev
->fd_rule_list
, rule_node
) {
4431 if (rule
->location
>= location
)
4436 if (rule
&& rule
->location
== location
) {
4437 hlist_del(&rule
->rule_node
);
4439 hdev
->hclge_fd_rule_num
--;
4444 } else if (!is_add
) {
4445 dev_err(&hdev
->pdev
->dev
,
4446 "delete fail, rule %d is inexistent\n",
4451 INIT_HLIST_NODE(&new_rule
->rule_node
);
4454 hlist_add_behind(&new_rule
->rule_node
, &parent
->rule_node
);
4456 hlist_add_head(&new_rule
->rule_node
, &hdev
->fd_rule_list
);
4458 hdev
->hclge_fd_rule_num
++;
4463 static int hclge_fd_get_tuple(struct hclge_dev
*hdev
,
4464 struct ethtool_rx_flow_spec
*fs
,
4465 struct hclge_fd_rule
*rule
)
4467 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
4469 switch (flow_type
) {
4473 rule
->tuples
.src_ip
[3] =
4474 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4src
);
4475 rule
->tuples_mask
.src_ip
[3] =
4476 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4src
);
4478 rule
->tuples
.dst_ip
[3] =
4479 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4dst
);
4480 rule
->tuples_mask
.dst_ip
[3] =
4481 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4dst
);
4483 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.psrc
);
4484 rule
->tuples_mask
.src_port
=
4485 be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.psrc
);
4487 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.pdst
);
4488 rule
->tuples_mask
.dst_port
=
4489 be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.pdst
);
4491 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip4_spec
.tos
;
4492 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip4_spec
.tos
;
4494 rule
->tuples
.ether_proto
= ETH_P_IP
;
4495 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4499 rule
->tuples
.src_ip
[3] =
4500 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4src
);
4501 rule
->tuples_mask
.src_ip
[3] =
4502 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4src
);
4504 rule
->tuples
.dst_ip
[3] =
4505 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4dst
);
4506 rule
->tuples_mask
.dst_ip
[3] =
4507 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4dst
);
4509 rule
->tuples
.ip_tos
= fs
->h_u
.usr_ip4_spec
.tos
;
4510 rule
->tuples_mask
.ip_tos
= fs
->m_u
.usr_ip4_spec
.tos
;
4512 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip4_spec
.proto
;
4513 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip4_spec
.proto
;
4515 rule
->tuples
.ether_proto
= ETH_P_IP
;
4516 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4522 be32_to_cpu_array(rule
->tuples
.src_ip
,
4523 fs
->h_u
.tcp_ip6_spec
.ip6src
, 4);
4524 be32_to_cpu_array(rule
->tuples_mask
.src_ip
,
4525 fs
->m_u
.tcp_ip6_spec
.ip6src
, 4);
4527 be32_to_cpu_array(rule
->tuples
.dst_ip
,
4528 fs
->h_u
.tcp_ip6_spec
.ip6dst
, 4);
4529 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
,
4530 fs
->m_u
.tcp_ip6_spec
.ip6dst
, 4);
4532 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.psrc
);
4533 rule
->tuples_mask
.src_port
=
4534 be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.psrc
);
4536 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.pdst
);
4537 rule
->tuples_mask
.dst_port
=
4538 be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.pdst
);
4540 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
4541 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4544 case IPV6_USER_FLOW
:
4545 be32_to_cpu_array(rule
->tuples
.src_ip
,
4546 fs
->h_u
.usr_ip6_spec
.ip6src
, 4);
4547 be32_to_cpu_array(rule
->tuples_mask
.src_ip
,
4548 fs
->m_u
.usr_ip6_spec
.ip6src
, 4);
4550 be32_to_cpu_array(rule
->tuples
.dst_ip
,
4551 fs
->h_u
.usr_ip6_spec
.ip6dst
, 4);
4552 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
,
4553 fs
->m_u
.usr_ip6_spec
.ip6dst
, 4);
4555 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip6_spec
.l4_proto
;
4556 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip6_spec
.l4_proto
;
4558 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
4559 rule
->tuples_mask
.ether_proto
= 0xFFFF;
4563 ether_addr_copy(rule
->tuples
.src_mac
,
4564 fs
->h_u
.ether_spec
.h_source
);
4565 ether_addr_copy(rule
->tuples_mask
.src_mac
,
4566 fs
->m_u
.ether_spec
.h_source
);
4568 ether_addr_copy(rule
->tuples
.dst_mac
,
4569 fs
->h_u
.ether_spec
.h_dest
);
4570 ether_addr_copy(rule
->tuples_mask
.dst_mac
,
4571 fs
->m_u
.ether_spec
.h_dest
);
4573 rule
->tuples
.ether_proto
=
4574 be16_to_cpu(fs
->h_u
.ether_spec
.h_proto
);
4575 rule
->tuples_mask
.ether_proto
=
4576 be16_to_cpu(fs
->m_u
.ether_spec
.h_proto
);
4583 switch (flow_type
) {
4586 rule
->tuples
.ip_proto
= IPPROTO_SCTP
;
4587 rule
->tuples_mask
.ip_proto
= 0xFF;
4591 rule
->tuples
.ip_proto
= IPPROTO_TCP
;
4592 rule
->tuples_mask
.ip_proto
= 0xFF;
4596 rule
->tuples
.ip_proto
= IPPROTO_UDP
;
4597 rule
->tuples_mask
.ip_proto
= 0xFF;
4603 if ((fs
->flow_type
& FLOW_EXT
)) {
4604 rule
->tuples
.vlan_tag1
= be16_to_cpu(fs
->h_ext
.vlan_tci
);
4605 rule
->tuples_mask
.vlan_tag1
= be16_to_cpu(fs
->m_ext
.vlan_tci
);
4608 if (fs
->flow_type
& FLOW_MAC_EXT
) {
4609 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_ext
.h_dest
);
4610 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_ext
.h_dest
);
4616 static int hclge_add_fd_entry(struct hnae3_handle
*handle
,
4617 struct ethtool_rxnfc
*cmd
)
4619 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4620 struct hclge_dev
*hdev
= vport
->back
;
4621 u16 dst_vport_id
= 0, q_index
= 0;
4622 struct ethtool_rx_flow_spec
*fs
;
4623 struct hclge_fd_rule
*rule
;
4628 if (!hnae3_dev_fd_supported(hdev
))
4631 if (!hdev
->fd_cfg
.fd_en
) {
4632 dev_warn(&hdev
->pdev
->dev
,
4633 "Please enable flow director first\n");
4637 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
4639 ret
= hclge_fd_check_spec(hdev
, fs
, &unused
);
4641 dev_err(&hdev
->pdev
->dev
, "Check fd spec failed\n");
4645 if (fs
->ring_cookie
== RX_CLS_FLOW_DISC
) {
4646 action
= HCLGE_FD_ACTION_DROP_PACKET
;
4648 u32 ring
= ethtool_get_flow_spec_ring(fs
->ring_cookie
);
4649 u8 vf
= ethtool_get_flow_spec_ring_vf(fs
->ring_cookie
);
4652 dst_vport_id
= vf
? hdev
->vport
[vf
].vport_id
: vport
->vport_id
;
4653 tqps
= vf
? hdev
->vport
[vf
].alloc_tqps
: vport
->alloc_tqps
;
4656 dev_err(&hdev
->pdev
->dev
,
4657 "Error: queue id (%d) > max tqp num (%d)\n",
4662 if (vf
> hdev
->num_req_vfs
) {
4663 dev_err(&hdev
->pdev
->dev
,
4664 "Error: vf id (%d) > max vf num (%d)\n",
4665 vf
, hdev
->num_req_vfs
);
4669 action
= HCLGE_FD_ACTION_ACCEPT_PACKET
;
4673 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
4677 ret
= hclge_fd_get_tuple(hdev
, fs
, rule
);
4681 rule
->flow_type
= fs
->flow_type
;
4683 rule
->location
= fs
->location
;
4684 rule
->unused_tuple
= unused
;
4685 rule
->vf_id
= dst_vport_id
;
4686 rule
->queue_id
= q_index
;
4687 rule
->action
= action
;
4689 ret
= hclge_config_action(hdev
, HCLGE_FD_STAGE_1
, rule
);
4693 ret
= hclge_config_key(hdev
, HCLGE_FD_STAGE_1
, rule
);
4697 ret
= hclge_fd_update_rule_list(hdev
, rule
, fs
->location
, true);
4708 static int hclge_del_fd_entry(struct hnae3_handle
*handle
,
4709 struct ethtool_rxnfc
*cmd
)
4711 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4712 struct hclge_dev
*hdev
= vport
->back
;
4713 struct ethtool_rx_flow_spec
*fs
;
4716 if (!hnae3_dev_fd_supported(hdev
))
4719 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
4721 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
4724 if (!hclge_fd_rule_exist(hdev
, fs
->location
)) {
4725 dev_err(&hdev
->pdev
->dev
,
4726 "Delete fail, rule %d is inexistent\n",
4731 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
4732 fs
->location
, NULL
, false);
4736 return hclge_fd_update_rule_list(hdev
, NULL
, fs
->location
,
4740 static void hclge_del_all_fd_entries(struct hnae3_handle
*handle
,
4743 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4744 struct hclge_dev
*hdev
= vport
->back
;
4745 struct hclge_fd_rule
*rule
;
4746 struct hlist_node
*node
;
4748 if (!hnae3_dev_fd_supported(hdev
))
4752 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
,
4754 hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
4755 rule
->location
, NULL
, false);
4756 hlist_del(&rule
->rule_node
);
4758 hdev
->hclge_fd_rule_num
--;
4761 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
,
4763 hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
4764 rule
->location
, NULL
, false);
4768 static int hclge_restore_fd_entries(struct hnae3_handle
*handle
)
4770 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4771 struct hclge_dev
*hdev
= vport
->back
;
4772 struct hclge_fd_rule
*rule
;
4773 struct hlist_node
*node
;
4776 /* Return ok here, because reset error handling will check this
4777 * return value. If error is returned here, the reset process will
4780 if (!hnae3_dev_fd_supported(hdev
))
4783 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
4784 ret
= hclge_config_action(hdev
, HCLGE_FD_STAGE_1
, rule
);
4786 ret
= hclge_config_key(hdev
, HCLGE_FD_STAGE_1
, rule
);
4789 dev_warn(&hdev
->pdev
->dev
,
4790 "Restore rule %d failed, remove it\n",
4792 hlist_del(&rule
->rule_node
);
4794 hdev
->hclge_fd_rule_num
--;
4800 static int hclge_get_fd_rule_cnt(struct hnae3_handle
*handle
,
4801 struct ethtool_rxnfc
*cmd
)
4803 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4804 struct hclge_dev
*hdev
= vport
->back
;
4806 if (!hnae3_dev_fd_supported(hdev
))
4809 cmd
->rule_cnt
= hdev
->hclge_fd_rule_num
;
4810 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
4815 static int hclge_get_fd_rule_info(struct hnae3_handle
*handle
,
4816 struct ethtool_rxnfc
*cmd
)
4818 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4819 struct hclge_fd_rule
*rule
= NULL
;
4820 struct hclge_dev
*hdev
= vport
->back
;
4821 struct ethtool_rx_flow_spec
*fs
;
4822 struct hlist_node
*node2
;
4824 if (!hnae3_dev_fd_supported(hdev
))
4827 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
4829 hlist_for_each_entry_safe(rule
, node2
, &hdev
->fd_rule_list
, rule_node
) {
4830 if (rule
->location
>= fs
->location
)
4834 if (!rule
|| fs
->location
!= rule
->location
)
4837 fs
->flow_type
= rule
->flow_type
;
4838 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
4842 fs
->h_u
.tcp_ip4_spec
.ip4src
=
4843 cpu_to_be32(rule
->tuples
.src_ip
[3]);
4844 fs
->m_u
.tcp_ip4_spec
.ip4src
=
4845 rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
4846 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[3]);
4848 fs
->h_u
.tcp_ip4_spec
.ip4dst
=
4849 cpu_to_be32(rule
->tuples
.dst_ip
[3]);
4850 fs
->m_u
.tcp_ip4_spec
.ip4dst
=
4851 rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
4852 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[3]);
4854 fs
->h_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(rule
->tuples
.src_port
);
4855 fs
->m_u
.tcp_ip4_spec
.psrc
=
4856 rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
4857 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
4859 fs
->h_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
4860 fs
->m_u
.tcp_ip4_spec
.pdst
=
4861 rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
4862 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
4864 fs
->h_u
.tcp_ip4_spec
.tos
= rule
->tuples
.ip_tos
;
4865 fs
->m_u
.tcp_ip4_spec
.tos
=
4866 rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
4867 0 : rule
->tuples_mask
.ip_tos
;
4871 fs
->h_u
.usr_ip4_spec
.ip4src
=
4872 cpu_to_be32(rule
->tuples
.src_ip
[3]);
4873 fs
->m_u
.tcp_ip4_spec
.ip4src
=
4874 rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
4875 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[3]);
4877 fs
->h_u
.usr_ip4_spec
.ip4dst
=
4878 cpu_to_be32(rule
->tuples
.dst_ip
[3]);
4879 fs
->m_u
.usr_ip4_spec
.ip4dst
=
4880 rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
4881 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[3]);
4883 fs
->h_u
.usr_ip4_spec
.tos
= rule
->tuples
.ip_tos
;
4884 fs
->m_u
.usr_ip4_spec
.tos
=
4885 rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
4886 0 : rule
->tuples_mask
.ip_tos
;
4888 fs
->h_u
.usr_ip4_spec
.proto
= rule
->tuples
.ip_proto
;
4889 fs
->m_u
.usr_ip4_spec
.proto
=
4890 rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
4891 0 : rule
->tuples_mask
.ip_proto
;
4893 fs
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
4899 cpu_to_be32_array(fs
->h_u
.tcp_ip6_spec
.ip6src
,
4900 rule
->tuples
.src_ip
, 4);
4901 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
4902 memset(fs
->m_u
.tcp_ip6_spec
.ip6src
, 0, sizeof(int) * 4);
4904 cpu_to_be32_array(fs
->m_u
.tcp_ip6_spec
.ip6src
,
4905 rule
->tuples_mask
.src_ip
, 4);
4907 cpu_to_be32_array(fs
->h_u
.tcp_ip6_spec
.ip6dst
,
4908 rule
->tuples
.dst_ip
, 4);
4909 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
4910 memset(fs
->m_u
.tcp_ip6_spec
.ip6dst
, 0, sizeof(int) * 4);
4912 cpu_to_be32_array(fs
->m_u
.tcp_ip6_spec
.ip6dst
,
4913 rule
->tuples_mask
.dst_ip
, 4);
4915 fs
->h_u
.tcp_ip6_spec
.psrc
= cpu_to_be16(rule
->tuples
.src_port
);
4916 fs
->m_u
.tcp_ip6_spec
.psrc
=
4917 rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
4918 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
4920 fs
->h_u
.tcp_ip6_spec
.pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
4921 fs
->m_u
.tcp_ip6_spec
.pdst
=
4922 rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
4923 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
4926 case IPV6_USER_FLOW
:
4927 cpu_to_be32_array(fs
->h_u
.usr_ip6_spec
.ip6src
,
4928 rule
->tuples
.src_ip
, 4);
4929 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
4930 memset(fs
->m_u
.usr_ip6_spec
.ip6src
, 0, sizeof(int) * 4);
4932 cpu_to_be32_array(fs
->m_u
.usr_ip6_spec
.ip6src
,
4933 rule
->tuples_mask
.src_ip
, 4);
4935 cpu_to_be32_array(fs
->h_u
.usr_ip6_spec
.ip6dst
,
4936 rule
->tuples
.dst_ip
, 4);
4937 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
4938 memset(fs
->m_u
.usr_ip6_spec
.ip6dst
, 0, sizeof(int) * 4);
4940 cpu_to_be32_array(fs
->m_u
.usr_ip6_spec
.ip6dst
,
4941 rule
->tuples_mask
.dst_ip
, 4);
4943 fs
->h_u
.usr_ip6_spec
.l4_proto
= rule
->tuples
.ip_proto
;
4944 fs
->m_u
.usr_ip6_spec
.l4_proto
=
4945 rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
4946 0 : rule
->tuples_mask
.ip_proto
;
4950 ether_addr_copy(fs
->h_u
.ether_spec
.h_source
,
4951 rule
->tuples
.src_mac
);
4952 if (rule
->unused_tuple
& BIT(INNER_SRC_MAC
))
4953 eth_zero_addr(fs
->m_u
.ether_spec
.h_source
);
4955 ether_addr_copy(fs
->m_u
.ether_spec
.h_source
,
4956 rule
->tuples_mask
.src_mac
);
4958 ether_addr_copy(fs
->h_u
.ether_spec
.h_dest
,
4959 rule
->tuples
.dst_mac
);
4960 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
4961 eth_zero_addr(fs
->m_u
.ether_spec
.h_dest
);
4963 ether_addr_copy(fs
->m_u
.ether_spec
.h_dest
,
4964 rule
->tuples_mask
.dst_mac
);
4966 fs
->h_u
.ether_spec
.h_proto
=
4967 cpu_to_be16(rule
->tuples
.ether_proto
);
4968 fs
->m_u
.ether_spec
.h_proto
=
4969 rule
->unused_tuple
& BIT(INNER_ETH_TYPE
) ?
4970 0 : cpu_to_be16(rule
->tuples_mask
.ether_proto
);
4977 if (fs
->flow_type
& FLOW_EXT
) {
4978 fs
->h_ext
.vlan_tci
= cpu_to_be16(rule
->tuples
.vlan_tag1
);
4979 fs
->m_ext
.vlan_tci
=
4980 rule
->unused_tuple
& BIT(INNER_VLAN_TAG_FST
) ?
4981 cpu_to_be16(VLAN_VID_MASK
) :
4982 cpu_to_be16(rule
->tuples_mask
.vlan_tag1
);
4985 if (fs
->flow_type
& FLOW_MAC_EXT
) {
4986 ether_addr_copy(fs
->h_ext
.h_dest
, rule
->tuples
.dst_mac
);
4987 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
4988 eth_zero_addr(fs
->m_u
.ether_spec
.h_dest
);
4990 ether_addr_copy(fs
->m_u
.ether_spec
.h_dest
,
4991 rule
->tuples_mask
.dst_mac
);
4994 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
4995 fs
->ring_cookie
= RX_CLS_FLOW_DISC
;
4999 fs
->ring_cookie
= rule
->queue_id
;
5000 vf_id
= rule
->vf_id
;
5001 vf_id
<<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF
;
5002 fs
->ring_cookie
|= vf_id
;
5008 static int hclge_get_all_rules(struct hnae3_handle
*handle
,
5009 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
5011 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5012 struct hclge_dev
*hdev
= vport
->back
;
5013 struct hclge_fd_rule
*rule
;
5014 struct hlist_node
*node2
;
5017 if (!hnae3_dev_fd_supported(hdev
))
5020 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
5022 hlist_for_each_entry_safe(rule
, node2
,
5023 &hdev
->fd_rule_list
, rule_node
) {
5024 if (cnt
== cmd
->rule_cnt
)
5027 rule_locs
[cnt
] = rule
->location
;
5031 cmd
->rule_cnt
= cnt
;
5036 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
)
5038 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5039 struct hclge_dev
*hdev
= vport
->back
;
5041 return hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
) ||
5042 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
);
5045 static bool hclge_ae_dev_resetting(struct hnae3_handle
*handle
)
5047 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5048 struct hclge_dev
*hdev
= vport
->back
;
5050 return test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
5053 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
5055 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5056 struct hclge_dev
*hdev
= vport
->back
;
5058 return hdev
->reset_count
;
5061 static void hclge_enable_fd(struct hnae3_handle
*handle
, bool enable
)
5063 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5064 struct hclge_dev
*hdev
= vport
->back
;
5066 hdev
->fd_cfg
.fd_en
= enable
;
5068 hclge_del_all_fd_entries(handle
, false);
5070 hclge_restore_fd_entries(handle
);
5073 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
5075 struct hclge_desc desc
;
5076 struct hclge_config_mac_mode_cmd
*req
=
5077 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
5081 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
5082 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, enable
);
5083 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, enable
);
5084 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, enable
);
5085 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, enable
);
5086 hnae3_set_bit(loop_en
, HCLGE_MAC_1588_TX_B
, 0);
5087 hnae3_set_bit(loop_en
, HCLGE_MAC_1588_RX_B
, 0);
5088 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, 0);
5089 hnae3_set_bit(loop_en
, HCLGE_MAC_LINE_LP_B
, 0);
5090 hnae3_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, enable
);
5091 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, enable
);
5092 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, enable
);
5093 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, enable
);
5094 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, enable
);
5095 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, enable
);
5096 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
5098 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5100 dev_err(&hdev
->pdev
->dev
,
5101 "mac enable fail, ret =%d.\n", ret
);
5104 static int hclge_set_app_loopback(struct hclge_dev
*hdev
, bool en
)
5106 struct hclge_config_mac_mode_cmd
*req
;
5107 struct hclge_desc desc
;
5111 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
5112 /* 1 Read out the MAC mode config at first */
5113 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, true);
5114 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5116 dev_err(&hdev
->pdev
->dev
,
5117 "mac loopback get fail, ret =%d.\n", ret
);
5121 /* 2 Then setup the loopback flag */
5122 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
5123 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, en
? 1 : 0);
5124 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, en
? 1 : 0);
5125 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, en
? 1 : 0);
5127 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
5129 /* 3 Config mac work mode with loopback flag
5130 * and its original configure parameters
5132 hclge_cmd_reuse_desc(&desc
, false);
5133 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5135 dev_err(&hdev
->pdev
->dev
,
5136 "mac loopback set fail, ret =%d.\n", ret
);
5140 static int hclge_set_serdes_loopback(struct hclge_dev
*hdev
, bool en
,
5141 enum hnae3_loop loop_mode
)
5143 #define HCLGE_SERDES_RETRY_MS 10
5144 #define HCLGE_SERDES_RETRY_NUM 100
5145 struct hclge_serdes_lb_cmd
*req
;
5146 struct hclge_desc desc
;
5150 req
= (struct hclge_serdes_lb_cmd
*)desc
.data
;
5151 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SERDES_LOOPBACK
, false);
5153 switch (loop_mode
) {
5154 case HNAE3_LOOP_SERIAL_SERDES
:
5155 loop_mode_b
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
5157 case HNAE3_LOOP_PARALLEL_SERDES
:
5158 loop_mode_b
= HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B
;
5161 dev_err(&hdev
->pdev
->dev
,
5162 "unsupported serdes loopback mode %d\n", loop_mode
);
5167 req
->enable
= loop_mode_b
;
5168 req
->mask
= loop_mode_b
;
5170 req
->mask
= loop_mode_b
;
5173 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5175 dev_err(&hdev
->pdev
->dev
,
5176 "serdes loopback set fail, ret = %d\n", ret
);
5181 msleep(HCLGE_SERDES_RETRY_MS
);
5182 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SERDES_LOOPBACK
,
5184 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5186 dev_err(&hdev
->pdev
->dev
,
5187 "serdes loopback get, ret = %d\n", ret
);
5190 } while (++i
< HCLGE_SERDES_RETRY_NUM
&&
5191 !(req
->result
& HCLGE_CMD_SERDES_DONE_B
));
5193 if (!(req
->result
& HCLGE_CMD_SERDES_DONE_B
)) {
5194 dev_err(&hdev
->pdev
->dev
, "serdes loopback set timeout\n");
5196 } else if (!(req
->result
& HCLGE_CMD_SERDES_SUCCESS_B
)) {
5197 dev_err(&hdev
->pdev
->dev
, "serdes loopback set failed in fw\n");
5201 hclge_cfg_mac_mode(hdev
, en
);
5205 static int hclge_tqp_enable(struct hclge_dev
*hdev
, int tqp_id
,
5206 int stream_id
, bool enable
)
5208 struct hclge_desc desc
;
5209 struct hclge_cfg_com_tqp_queue_cmd
*req
=
5210 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
5213 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
5214 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGE_RING_ID_MASK
);
5215 req
->stream_id
= cpu_to_le16(stream_id
);
5216 req
->enable
|= enable
<< HCLGE_TQP_ENABLE_B
;
5218 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5220 dev_err(&hdev
->pdev
->dev
,
5221 "Tqp enable fail, status =%d.\n", ret
);
5225 static int hclge_set_loopback(struct hnae3_handle
*handle
,
5226 enum hnae3_loop loop_mode
, bool en
)
5228 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5229 struct hclge_dev
*hdev
= vport
->back
;
5232 switch (loop_mode
) {
5233 case HNAE3_LOOP_APP
:
5234 ret
= hclge_set_app_loopback(hdev
, en
);
5236 case HNAE3_LOOP_SERIAL_SERDES
:
5237 case HNAE3_LOOP_PARALLEL_SERDES
:
5238 ret
= hclge_set_serdes_loopback(hdev
, en
, loop_mode
);
5242 dev_err(&hdev
->pdev
->dev
,
5243 "loop_mode %d is not supported\n", loop_mode
);
5247 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
5248 ret
= hclge_tqp_enable(hdev
, i
, 0, en
);
5256 static void hclge_reset_tqp_stats(struct hnae3_handle
*handle
)
5258 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5259 struct hnae3_queue
*queue
;
5260 struct hclge_tqp
*tqp
;
5263 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
5264 queue
= handle
->kinfo
.tqp
[i
];
5265 tqp
= container_of(queue
, struct hclge_tqp
, q
);
5266 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
5270 static int hclge_ae_start(struct hnae3_handle
*handle
)
5272 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5273 struct hclge_dev
*hdev
= vport
->back
;
5276 hclge_cfg_mac_mode(hdev
, true);
5277 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5278 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
5279 hdev
->hw
.mac
.link
= 0;
5281 /* reset tqp stats */
5282 hclge_reset_tqp_stats(handle
);
5284 hclge_mac_start_phy(hdev
);
5289 static void hclge_ae_stop(struct hnae3_handle
*handle
)
5291 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5292 struct hclge_dev
*hdev
= vport
->back
;
5294 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
5296 del_timer_sync(&hdev
->service_timer
);
5297 cancel_work_sync(&hdev
->service_task
);
5298 clear_bit(HCLGE_STATE_SERVICE_SCHED
, &hdev
->state
);
5300 /* If it is not PF reset, the firmware will disable the MAC,
5301 * so it only need to stop phy here.
5303 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) &&
5304 hdev
->reset_type
!= HNAE3_FUNC_RESET
) {
5305 hclge_mac_stop_phy(hdev
);
5310 hclge_cfg_mac_mode(hdev
, false);
5312 hclge_mac_stop_phy(hdev
);
5314 /* reset tqp stats */
5315 hclge_reset_tqp_stats(handle
);
5316 del_timer_sync(&hdev
->service_timer
);
5317 cancel_work_sync(&hdev
->service_task
);
5318 hclge_update_link_status(hdev
);
5321 int hclge_vport_start(struct hclge_vport
*vport
)
5323 set_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
5324 vport
->last_active_jiffies
= jiffies
;
5328 void hclge_vport_stop(struct hclge_vport
*vport
)
5330 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
5333 static int hclge_client_start(struct hnae3_handle
*handle
)
5335 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5337 return hclge_vport_start(vport
);
5340 static void hclge_client_stop(struct hnae3_handle
*handle
)
5342 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5344 hclge_vport_stop(vport
);
5347 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
5348 u16 cmdq_resp
, u8 resp_code
,
5349 enum hclge_mac_vlan_tbl_opcode op
)
5351 struct hclge_dev
*hdev
= vport
->back
;
5352 int return_status
= -EIO
;
5355 dev_err(&hdev
->pdev
->dev
,
5356 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5361 if (op
== HCLGE_MAC_VLAN_ADD
) {
5362 if ((!resp_code
) || (resp_code
== 1)) {
5364 } else if (resp_code
== 2) {
5365 return_status
= -ENOSPC
;
5366 dev_err(&hdev
->pdev
->dev
,
5367 "add mac addr failed for uc_overflow.\n");
5368 } else if (resp_code
== 3) {
5369 return_status
= -ENOSPC
;
5370 dev_err(&hdev
->pdev
->dev
,
5371 "add mac addr failed for mc_overflow.\n");
5373 dev_err(&hdev
->pdev
->dev
,
5374 "add mac addr failed for undefined, code=%d.\n",
5377 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
5380 } else if (resp_code
== 1) {
5381 return_status
= -ENOENT
;
5382 dev_dbg(&hdev
->pdev
->dev
,
5383 "remove mac addr failed for miss.\n");
5385 dev_err(&hdev
->pdev
->dev
,
5386 "remove mac addr failed for undefined, code=%d.\n",
5389 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
5392 } else if (resp_code
== 1) {
5393 return_status
= -ENOENT
;
5394 dev_dbg(&hdev
->pdev
->dev
,
5395 "lookup mac addr failed for miss.\n");
5397 dev_err(&hdev
->pdev
->dev
,
5398 "lookup mac addr failed for undefined, code=%d.\n",
5402 return_status
= -EINVAL
;
5403 dev_err(&hdev
->pdev
->dev
,
5404 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5408 return return_status
;
5411 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
5416 if (vfid
> 255 || vfid
< 0)
5419 if (vfid
>= 0 && vfid
<= 191) {
5420 word_num
= vfid
/ 32;
5421 bit_num
= vfid
% 32;
5423 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
5425 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
5427 word_num
= (vfid
- 192) / 32;
5428 bit_num
= vfid
% 32;
5430 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
5432 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
5438 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
5440 #define HCLGE_DESC_NUMBER 3
5441 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5444 for (i
= 0; i
< HCLGE_DESC_NUMBER
; i
++)
5445 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
5446 if (desc
[i
].data
[j
])
5452 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
5455 const unsigned char *mac_addr
= addr
;
5456 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
5457 (mac_addr
[0]) | (mac_addr
[1] << 8);
5458 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
5460 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
5461 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
5464 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
5465 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
5467 struct hclge_dev
*hdev
= vport
->back
;
5468 struct hclge_desc desc
;
5473 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
5475 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5477 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5479 dev_err(&hdev
->pdev
->dev
,
5480 "del mac addr failed for cmd_send, ret =%d.\n",
5484 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
5485 retval
= le16_to_cpu(desc
.retval
);
5487 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
5488 HCLGE_MAC_VLAN_REMOVE
);
5491 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
5492 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
5493 struct hclge_desc
*desc
,
5496 struct hclge_dev
*hdev
= vport
->back
;
5501 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
5503 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5504 memcpy(desc
[0].data
,
5506 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5507 hclge_cmd_setup_basic_desc(&desc
[1],
5508 HCLGE_OPC_MAC_VLAN_ADD
,
5510 desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5511 hclge_cmd_setup_basic_desc(&desc
[2],
5512 HCLGE_OPC_MAC_VLAN_ADD
,
5514 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
5516 memcpy(desc
[0].data
,
5518 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5519 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
5522 dev_err(&hdev
->pdev
->dev
,
5523 "lookup mac addr failed for cmd_send, ret =%d.\n",
5527 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
5528 retval
= le16_to_cpu(desc
[0].retval
);
5530 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
5531 HCLGE_MAC_VLAN_LKUP
);
5534 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
5535 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
5536 struct hclge_desc
*mc_desc
)
5538 struct hclge_dev
*hdev
= vport
->back
;
5545 struct hclge_desc desc
;
5547 hclge_cmd_setup_basic_desc(&desc
,
5548 HCLGE_OPC_MAC_VLAN_ADD
,
5550 memcpy(desc
.data
, req
,
5551 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5552 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5553 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
5554 retval
= le16_to_cpu(desc
.retval
);
5556 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
5558 HCLGE_MAC_VLAN_ADD
);
5560 hclge_cmd_reuse_desc(&mc_desc
[0], false);
5561 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5562 hclge_cmd_reuse_desc(&mc_desc
[1], false);
5563 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
5564 hclge_cmd_reuse_desc(&mc_desc
[2], false);
5565 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT
);
5566 memcpy(mc_desc
[0].data
, req
,
5567 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
5568 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
5569 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
5570 retval
= le16_to_cpu(mc_desc
[0].retval
);
5572 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
5574 HCLGE_MAC_VLAN_ADD
);
5578 dev_err(&hdev
->pdev
->dev
,
5579 "add mac addr failed for cmd_send, ret =%d.\n",
5587 static int hclge_init_umv_space(struct hclge_dev
*hdev
)
5589 u16 allocated_size
= 0;
5592 ret
= hclge_set_umv_space(hdev
, hdev
->wanted_umv_size
, &allocated_size
,
5597 if (allocated_size
< hdev
->wanted_umv_size
)
5598 dev_warn(&hdev
->pdev
->dev
,
5599 "Alloc umv space failed, want %d, get %d\n",
5600 hdev
->wanted_umv_size
, allocated_size
);
5602 mutex_init(&hdev
->umv_mutex
);
5603 hdev
->max_umv_size
= allocated_size
;
5604 hdev
->priv_umv_size
= hdev
->max_umv_size
/ (hdev
->num_req_vfs
+ 2);
5605 hdev
->share_umv_size
= hdev
->priv_umv_size
+
5606 hdev
->max_umv_size
% (hdev
->num_req_vfs
+ 2);
5611 static int hclge_uninit_umv_space(struct hclge_dev
*hdev
)
5615 if (hdev
->max_umv_size
> 0) {
5616 ret
= hclge_set_umv_space(hdev
, hdev
->max_umv_size
, NULL
,
5620 hdev
->max_umv_size
= 0;
5622 mutex_destroy(&hdev
->umv_mutex
);
5627 static int hclge_set_umv_space(struct hclge_dev
*hdev
, u16 space_size
,
5628 u16
*allocated_size
, bool is_alloc
)
5630 struct hclge_umv_spc_alc_cmd
*req
;
5631 struct hclge_desc desc
;
5634 req
= (struct hclge_umv_spc_alc_cmd
*)desc
.data
;
5635 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_ALLOCATE
, false);
5636 hnae3_set_bit(req
->allocate
, HCLGE_UMV_SPC_ALC_B
, !is_alloc
);
5637 req
->space_size
= cpu_to_le32(space_size
);
5639 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5641 dev_err(&hdev
->pdev
->dev
,
5642 "%s umv space failed for cmd_send, ret =%d\n",
5643 is_alloc
? "allocate" : "free", ret
);
5647 if (is_alloc
&& allocated_size
)
5648 *allocated_size
= le32_to_cpu(desc
.data
[1]);
5653 static void hclge_reset_umv_space(struct hclge_dev
*hdev
)
5655 struct hclge_vport
*vport
;
5658 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
5659 vport
= &hdev
->vport
[i
];
5660 vport
->used_umv_num
= 0;
5663 mutex_lock(&hdev
->umv_mutex
);
5664 hdev
->share_umv_size
= hdev
->priv_umv_size
+
5665 hdev
->max_umv_size
% (hdev
->num_req_vfs
+ 2);
5666 mutex_unlock(&hdev
->umv_mutex
);
5669 static bool hclge_is_umv_space_full(struct hclge_vport
*vport
)
5671 struct hclge_dev
*hdev
= vport
->back
;
5674 mutex_lock(&hdev
->umv_mutex
);
5675 is_full
= (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
5676 hdev
->share_umv_size
== 0);
5677 mutex_unlock(&hdev
->umv_mutex
);
5682 static void hclge_update_umv_space(struct hclge_vport
*vport
, bool is_free
)
5684 struct hclge_dev
*hdev
= vport
->back
;
5686 mutex_lock(&hdev
->umv_mutex
);
5688 if (vport
->used_umv_num
> hdev
->priv_umv_size
)
5689 hdev
->share_umv_size
++;
5690 vport
->used_umv_num
--;
5692 if (vport
->used_umv_num
>= hdev
->priv_umv_size
)
5693 hdev
->share_umv_size
--;
5694 vport
->used_umv_num
++;
5696 mutex_unlock(&hdev
->umv_mutex
);
5699 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
5700 const unsigned char *addr
)
5702 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5704 return hclge_add_uc_addr_common(vport
, addr
);
5707 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
5708 const unsigned char *addr
)
5710 struct hclge_dev
*hdev
= vport
->back
;
5711 struct hclge_mac_vlan_tbl_entry_cmd req
;
5712 struct hclge_desc desc
;
5713 u16 egress_port
= 0;
5716 /* mac addr check */
5717 if (is_zero_ether_addr(addr
) ||
5718 is_broadcast_ether_addr(addr
) ||
5719 is_multicast_ether_addr(addr
)) {
5720 dev_err(&hdev
->pdev
->dev
,
5721 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5723 is_zero_ether_addr(addr
),
5724 is_broadcast_ether_addr(addr
),
5725 is_multicast_ether_addr(addr
));
5729 memset(&req
, 0, sizeof(req
));
5730 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5732 hnae3_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
5733 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
5735 req
.egress_port
= cpu_to_le16(egress_port
);
5737 hclge_prepare_mac_addr(&req
, addr
);
5739 /* Lookup the mac address in the mac_vlan table, and add
5740 * it if the entry is inexistent. Repeated unicast entry
5741 * is not allowed in the mac vlan table.
5743 ret
= hclge_lookup_mac_vlan_tbl(vport
, &req
, &desc
, false);
5744 if (ret
== -ENOENT
) {
5745 if (!hclge_is_umv_space_full(vport
)) {
5746 ret
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
5748 hclge_update_umv_space(vport
, false);
5752 dev_err(&hdev
->pdev
->dev
, "UC MAC table full(%u)\n",
5753 hdev
->priv_umv_size
);
5758 /* check if we just hit the duplicate */
5762 dev_err(&hdev
->pdev
->dev
,
5763 "PF failed to add unicast entry(%pM) in the MAC table\n",
5769 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
5770 const unsigned char *addr
)
5772 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5774 return hclge_rm_uc_addr_common(vport
, addr
);
5777 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
5778 const unsigned char *addr
)
5780 struct hclge_dev
*hdev
= vport
->back
;
5781 struct hclge_mac_vlan_tbl_entry_cmd req
;
5784 /* mac addr check */
5785 if (is_zero_ether_addr(addr
) ||
5786 is_broadcast_ether_addr(addr
) ||
5787 is_multicast_ether_addr(addr
)) {
5788 dev_dbg(&hdev
->pdev
->dev
,
5789 "Remove mac err! invalid mac:%pM.\n",
5794 memset(&req
, 0, sizeof(req
));
5795 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5796 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
5797 hclge_prepare_mac_addr(&req
, addr
);
5798 ret
= hclge_remove_mac_vlan_tbl(vport
, &req
);
5800 hclge_update_umv_space(vport
, true);
5805 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
5806 const unsigned char *addr
)
5808 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5810 return hclge_add_mc_addr_common(vport
, addr
);
5813 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
5814 const unsigned char *addr
)
5816 struct hclge_dev
*hdev
= vport
->back
;
5817 struct hclge_mac_vlan_tbl_entry_cmd req
;
5818 struct hclge_desc desc
[3];
5821 /* mac addr check */
5822 if (!is_multicast_ether_addr(addr
)) {
5823 dev_err(&hdev
->pdev
->dev
,
5824 "Add mc mac err! invalid mac:%pM.\n",
5828 memset(&req
, 0, sizeof(req
));
5829 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5830 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
5831 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
5832 hnae3_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5833 hclge_prepare_mac_addr(&req
, addr
);
5834 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
5836 /* This mac addr exist, update VFID for it */
5837 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
5838 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
5840 /* This mac addr do not exist, add new entry for it */
5841 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
5842 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
5843 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
5844 hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
5845 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
5848 if (status
== -ENOSPC
)
5849 dev_err(&hdev
->pdev
->dev
, "mc mac vlan table is full\n");
5854 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
5855 const unsigned char *addr
)
5857 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5859 return hclge_rm_mc_addr_common(vport
, addr
);
5862 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
5863 const unsigned char *addr
)
5865 struct hclge_dev
*hdev
= vport
->back
;
5866 struct hclge_mac_vlan_tbl_entry_cmd req
;
5867 enum hclge_cmd_status status
;
5868 struct hclge_desc desc
[3];
5870 /* mac addr check */
5871 if (!is_multicast_ether_addr(addr
)) {
5872 dev_dbg(&hdev
->pdev
->dev
,
5873 "Remove mc mac err! invalid mac:%pM.\n",
5878 memset(&req
, 0, sizeof(req
));
5879 hnae3_set_bit(req
.flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5880 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
5881 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
5882 hnae3_set_bit(req
.mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
5883 hclge_prepare_mac_addr(&req
, addr
);
5884 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
5886 /* This mac addr exist, remove this handle's VFID for it */
5887 hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
5889 if (hclge_is_all_function_id_zero(desc
))
5890 /* All the vfid is zero, so need to delete this entry */
5891 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
5893 /* Not all the vfid is zero, update the vfid */
5894 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
5897 /* Maybe this mac address is in mta table, but it cannot be
5898 * deleted here because an entry of mta represents an address
5899 * range rather than a specific address. the delete action to
5900 * all entries will take effect in update_mta_status called by
5901 * hns3_nic_set_rx_mode.
5909 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev
*hdev
,
5910 u16 cmdq_resp
, u8 resp_code
)
5912 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
5913 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
5914 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
5915 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
5920 dev_err(&hdev
->pdev
->dev
,
5921 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5926 switch (resp_code
) {
5927 case HCLGE_ETHERTYPE_SUCCESS_ADD
:
5928 case HCLGE_ETHERTYPE_ALREADY_ADD
:
5931 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW
:
5932 dev_err(&hdev
->pdev
->dev
,
5933 "add mac ethertype failed for manager table overflow.\n");
5934 return_status
= -EIO
;
5936 case HCLGE_ETHERTYPE_KEY_CONFLICT
:
5937 dev_err(&hdev
->pdev
->dev
,
5938 "add mac ethertype failed for key conflict.\n");
5939 return_status
= -EIO
;
5942 dev_err(&hdev
->pdev
->dev
,
5943 "add mac ethertype failed for undefined, code=%d.\n",
5945 return_status
= -EIO
;
5948 return return_status
;
5951 static int hclge_add_mgr_tbl(struct hclge_dev
*hdev
,
5952 const struct hclge_mac_mgr_tbl_entry_cmd
*req
)
5954 struct hclge_desc desc
;
5959 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_ETHTYPE_ADD
, false);
5960 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_mgr_tbl_entry_cmd
));
5962 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5964 dev_err(&hdev
->pdev
->dev
,
5965 "add mac ethertype failed for cmd_send, ret =%d.\n",
5970 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
5971 retval
= le16_to_cpu(desc
.retval
);
5973 return hclge_get_mac_ethertype_cmd_status(hdev
, retval
, resp_code
);
5976 static int init_mgr_tbl(struct hclge_dev
*hdev
)
5981 for (i
= 0; i
< ARRAY_SIZE(hclge_mgr_table
); i
++) {
5982 ret
= hclge_add_mgr_tbl(hdev
, &hclge_mgr_table
[i
]);
5984 dev_err(&hdev
->pdev
->dev
,
5985 "add mac ethertype failed, ret =%d.\n",
5994 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
5996 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5997 struct hclge_dev
*hdev
= vport
->back
;
5999 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
6002 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, void *p
,
6005 const unsigned char *new_addr
= (const unsigned char *)p
;
6006 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6007 struct hclge_dev
*hdev
= vport
->back
;
6010 /* mac addr check */
6011 if (is_zero_ether_addr(new_addr
) ||
6012 is_broadcast_ether_addr(new_addr
) ||
6013 is_multicast_ether_addr(new_addr
)) {
6014 dev_err(&hdev
->pdev
->dev
,
6015 "Change uc mac err! invalid mac:%p.\n",
6020 if (!is_first
&& hclge_rm_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
6021 dev_warn(&hdev
->pdev
->dev
,
6022 "remove old uc mac address fail.\n");
6024 ret
= hclge_add_uc_addr(handle
, new_addr
);
6026 dev_err(&hdev
->pdev
->dev
,
6027 "add uc mac address fail, ret =%d.\n",
6031 hclge_add_uc_addr(handle
, hdev
->hw
.mac
.mac_addr
))
6032 dev_err(&hdev
->pdev
->dev
,
6033 "restore uc mac address fail.\n");
6038 ret
= hclge_pause_addr_cfg(hdev
, new_addr
);
6040 dev_err(&hdev
->pdev
->dev
,
6041 "configure mac pause address fail, ret =%d.\n",
6046 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
6051 static int hclge_do_ioctl(struct hnae3_handle
*handle
, struct ifreq
*ifr
,
6054 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6055 struct hclge_dev
*hdev
= vport
->back
;
6057 if (!hdev
->hw
.mac
.phydev
)
6060 return phy_mii_ioctl(hdev
->hw
.mac
.phydev
, ifr
, cmd
);
6063 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
6064 u8 fe_type
, bool filter_en
)
6066 struct hclge_vlan_filter_ctrl_cmd
*req
;
6067 struct hclge_desc desc
;
6070 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, false);
6072 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
6073 req
->vlan_type
= vlan_type
;
6074 req
->vlan_fe
= filter_en
? fe_type
: 0;
6076 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6078 dev_err(&hdev
->pdev
->dev
, "set vlan filter fail, ret =%d.\n",
6084 #define HCLGE_FILTER_TYPE_VF 0
6085 #define HCLGE_FILTER_TYPE_PORT 1
6086 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6087 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6088 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6089 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6090 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6091 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6092 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6093 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6094 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6096 static void hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
6098 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6099 struct hclge_dev
*hdev
= vport
->back
;
6101 if (hdev
->pdev
->revision
>= 0x21) {
6102 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6103 HCLGE_FILTER_FE_EGRESS
, enable
);
6104 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
6105 HCLGE_FILTER_FE_INGRESS
, enable
);
6107 hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6108 HCLGE_FILTER_FE_EGRESS_V1_B
, enable
);
6111 handle
->netdev_flags
|= HNAE3_VLAN_FLTR
;
6113 handle
->netdev_flags
&= ~HNAE3_VLAN_FLTR
;
6116 static int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, int vfid
,
6117 bool is_kill
, u16 vlan
, u8 qos
,
6120 #define HCLGE_MAX_VF_BYTES 16
6121 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
6122 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
6123 struct hclge_desc desc
[2];
6128 hclge_cmd_setup_basic_desc(&desc
[0],
6129 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
6130 hclge_cmd_setup_basic_desc(&desc
[1],
6131 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
6133 desc
[0].flag
|= cpu_to_le16(HCLGE_CMD_FLAG_NEXT
);
6135 vf_byte_off
= vfid
/ 8;
6136 vf_byte_val
= 1 << (vfid
% 8);
6138 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
6139 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
6141 req0
->vlan_id
= cpu_to_le16(vlan
);
6142 req0
->vlan_cfg
= is_kill
;
6144 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
6145 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
6147 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
6149 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
6151 dev_err(&hdev
->pdev
->dev
,
6152 "Send vf vlan command fail, ret =%d.\n",
6158 #define HCLGE_VF_VLAN_NO_ENTRY 2
6159 if (!req0
->resp_code
|| req0
->resp_code
== 1)
6162 if (req0
->resp_code
== HCLGE_VF_VLAN_NO_ENTRY
) {
6163 dev_warn(&hdev
->pdev
->dev
,
6164 "vf vlan table is full, vf vlan filter is disabled\n");
6168 dev_err(&hdev
->pdev
->dev
,
6169 "Add vf vlan filter fail, ret =%d.\n",
6172 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6173 if (!req0
->resp_code
)
6176 if (req0
->resp_code
== HCLGE_VF_VLAN_DEL_NO_FOUND
) {
6177 dev_warn(&hdev
->pdev
->dev
,
6178 "vlan %d filter is not in vf vlan table\n",
6183 dev_err(&hdev
->pdev
->dev
,
6184 "Kill vf vlan filter fail, ret =%d.\n",
6191 static int hclge_set_port_vlan_filter(struct hclge_dev
*hdev
, __be16 proto
,
6192 u16 vlan_id
, bool is_kill
)
6194 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
6195 struct hclge_desc desc
;
6196 u8 vlan_offset_byte_val
;
6197 u8 vlan_offset_byte
;
6201 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
6203 vlan_offset_160
= vlan_id
/ 160;
6204 vlan_offset_byte
= (vlan_id
% 160) / 8;
6205 vlan_offset_byte_val
= 1 << (vlan_id
% 8);
6207 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
6208 req
->vlan_offset
= vlan_offset_160
;
6209 req
->vlan_cfg
= is_kill
;
6210 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
6212 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6214 dev_err(&hdev
->pdev
->dev
,
6215 "port vlan command, send fail, ret =%d.\n", ret
);
6219 static int hclge_set_vlan_filter_hw(struct hclge_dev
*hdev
, __be16 proto
,
6220 u16 vport_id
, u16 vlan_id
, u8 qos
,
6223 u16 vport_idx
, vport_num
= 0;
6226 if (is_kill
&& !vlan_id
)
6229 ret
= hclge_set_vf_vlan_common(hdev
, vport_id
, is_kill
, vlan_id
,
6232 dev_err(&hdev
->pdev
->dev
,
6233 "Set %d vport vlan filter config fail, ret =%d.\n",
6238 /* vlan 0 may be added twice when 8021q module is enabled */
6239 if (!is_kill
&& !vlan_id
&&
6240 test_bit(vport_id
, hdev
->vlan_table
[vlan_id
]))
6243 if (!is_kill
&& test_and_set_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
6244 dev_err(&hdev
->pdev
->dev
,
6245 "Add port vlan failed, vport %d is already in vlan %d\n",
6251 !test_and_clear_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
6252 dev_err(&hdev
->pdev
->dev
,
6253 "Delete port vlan failed, vport %d is not in vlan %d\n",
6258 for_each_set_bit(vport_idx
, hdev
->vlan_table
[vlan_id
], HCLGE_VPORT_NUM
)
6261 if ((is_kill
&& vport_num
== 0) || (!is_kill
&& vport_num
== 1))
6262 ret
= hclge_set_port_vlan_filter(hdev
, proto
, vlan_id
,
6268 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
6269 u16 vlan_id
, bool is_kill
)
6271 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6272 struct hclge_dev
*hdev
= vport
->back
;
6274 return hclge_set_vlan_filter_hw(hdev
, proto
, vport
->vport_id
, vlan_id
,
6278 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
6279 u16 vlan
, u8 qos
, __be16 proto
)
6281 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6282 struct hclge_dev
*hdev
= vport
->back
;
6284 if ((vfid
>= hdev
->num_alloc_vfs
) || (vlan
> 4095) || (qos
> 7))
6286 if (proto
!= htons(ETH_P_8021Q
))
6287 return -EPROTONOSUPPORT
;
6289 return hclge_set_vlan_filter_hw(hdev
, proto
, vfid
, vlan
, qos
, false);
6292 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
6294 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
6295 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
6296 struct hclge_dev
*hdev
= vport
->back
;
6297 struct hclge_desc desc
;
6300 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
6302 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
6303 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
6304 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
6305 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG1_B
,
6306 vcfg
->accept_tag1
? 1 : 0);
6307 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG1_B
,
6308 vcfg
->accept_untag1
? 1 : 0);
6309 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG2_B
,
6310 vcfg
->accept_tag2
? 1 : 0);
6311 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG2_B
,
6312 vcfg
->accept_untag2
? 1 : 0);
6313 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
6314 vcfg
->insert_tag1_en
? 1 : 0);
6315 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
6316 vcfg
->insert_tag2_en
? 1 : 0);
6317 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
6319 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
6320 req
->vf_bitmap
[req
->vf_offset
] =
6321 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
6323 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6325 dev_err(&hdev
->pdev
->dev
,
6326 "Send port txvlan cfg command fail, ret =%d\n",
6332 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
6334 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
6335 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
6336 struct hclge_dev
*hdev
= vport
->back
;
6337 struct hclge_desc desc
;
6340 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
6342 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
6343 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
6344 vcfg
->strip_tag1_en
? 1 : 0);
6345 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
6346 vcfg
->strip_tag2_en
? 1 : 0);
6347 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
6348 vcfg
->vlan1_vlan_prionly
? 1 : 0);
6349 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
6350 vcfg
->vlan2_vlan_prionly
? 1 : 0);
6352 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
6353 req
->vf_bitmap
[req
->vf_offset
] =
6354 1 << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
6356 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6358 dev_err(&hdev
->pdev
->dev
,
6359 "Send port rxvlan cfg command fail, ret =%d\n",
6365 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
6367 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
6368 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
6369 struct hclge_desc desc
;
6372 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
6373 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
6374 rx_req
->ot_fst_vlan_type
=
6375 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
6376 rx_req
->ot_sec_vlan_type
=
6377 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
6378 rx_req
->in_fst_vlan_type
=
6379 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
6380 rx_req
->in_sec_vlan_type
=
6381 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
6383 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6385 dev_err(&hdev
->pdev
->dev
,
6386 "Send rxvlan protocol type command fail, ret =%d\n",
6391 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
6393 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)desc
.data
;
6394 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
6395 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
6397 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6399 dev_err(&hdev
->pdev
->dev
,
6400 "Send txvlan protocol type command fail, ret =%d\n",
6406 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
6408 #define HCLGE_DEF_VLAN_TYPE 0x8100
6410 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
6411 struct hclge_vport
*vport
;
6415 if (hdev
->pdev
->revision
>= 0x21) {
6416 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6417 HCLGE_FILTER_FE_EGRESS
, true);
6421 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
6422 HCLGE_FILTER_FE_INGRESS
, true);
6426 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
6427 HCLGE_FILTER_FE_EGRESS_V1_B
,
6433 handle
->netdev_flags
|= HNAE3_VLAN_FLTR
;
6435 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6436 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6437 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6438 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6439 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6440 hdev
->vlan_type_cfg
.tx_in_vlan_type
= HCLGE_DEF_VLAN_TYPE
;
6442 ret
= hclge_set_vlan_protocol_type(hdev
);
6446 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
6447 vport
= &hdev
->vport
[i
];
6448 vport
->txvlan_cfg
.accept_tag1
= true;
6449 vport
->txvlan_cfg
.accept_untag1
= true;
6451 /* accept_tag2 and accept_untag2 are not supported on
6452 * pdev revision(0x20), new revision support them. The
6453 * value of this two fields will not return error when driver
6454 * send command to fireware in revision(0x20).
6455 * This two fields can not configured by user.
6457 vport
->txvlan_cfg
.accept_tag2
= true;
6458 vport
->txvlan_cfg
.accept_untag2
= true;
6460 vport
->txvlan_cfg
.insert_tag1_en
= false;
6461 vport
->txvlan_cfg
.insert_tag2_en
= false;
6462 vport
->txvlan_cfg
.default_tag1
= 0;
6463 vport
->txvlan_cfg
.default_tag2
= 0;
6465 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
6469 vport
->rxvlan_cfg
.strip_tag1_en
= false;
6470 vport
->rxvlan_cfg
.strip_tag2_en
= true;
6471 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
6472 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
6474 ret
= hclge_set_vlan_rx_offload_cfg(vport
);
6479 return hclge_set_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
6482 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
6484 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6486 vport
->rxvlan_cfg
.strip_tag1_en
= false;
6487 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
6488 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
6489 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
6491 return hclge_set_vlan_rx_offload_cfg(vport
);
6494 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
)
6496 struct hclge_config_max_frm_size_cmd
*req
;
6497 struct hclge_desc desc
;
6499 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
6501 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
6502 req
->max_frm_size
= cpu_to_le16(new_mps
);
6503 req
->min_frm_size
= HCLGE_MAC_MIN_FRAME
;
6505 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6508 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
6510 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6512 return hclge_set_vport_mtu(vport
, new_mtu
);
6515 int hclge_set_vport_mtu(struct hclge_vport
*vport
, int new_mtu
)
6517 struct hclge_dev
*hdev
= vport
->back
;
6518 int i
, max_frm_size
, ret
= 0;
6520 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
6521 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
6522 max_frm_size
> HCLGE_MAC_MAX_FRAME
)
6525 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
6526 mutex_lock(&hdev
->vport_lock
);
6527 /* VF's mps must fit within hdev->mps */
6528 if (vport
->vport_id
&& max_frm_size
> hdev
->mps
) {
6529 mutex_unlock(&hdev
->vport_lock
);
6531 } else if (vport
->vport_id
) {
6532 vport
->mps
= max_frm_size
;
6533 mutex_unlock(&hdev
->vport_lock
);
6537 /* PF's mps must be greater then VF's mps */
6538 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++)
6539 if (max_frm_size
< hdev
->vport
[i
].mps
) {
6540 mutex_unlock(&hdev
->vport_lock
);
6544 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
6546 ret
= hclge_set_mac_mtu(hdev
, max_frm_size
);
6548 dev_err(&hdev
->pdev
->dev
,
6549 "Change mtu fail, ret =%d\n", ret
);
6553 hdev
->mps
= max_frm_size
;
6554 vport
->mps
= max_frm_size
;
6556 ret
= hclge_buffer_alloc(hdev
);
6558 dev_err(&hdev
->pdev
->dev
,
6559 "Allocate buffer fail, ret =%d\n", ret
);
6562 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
6563 mutex_unlock(&hdev
->vport_lock
);
6567 static int hclge_send_reset_tqp_cmd(struct hclge_dev
*hdev
, u16 queue_id
,
6570 struct hclge_reset_tqp_queue_cmd
*req
;
6571 struct hclge_desc desc
;
6574 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
6576 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
6577 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
6578 hnae3_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, enable
);
6580 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6582 dev_err(&hdev
->pdev
->dev
,
6583 "Send tqp reset cmd error, status =%d\n", ret
);
6590 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
)
6592 struct hclge_reset_tqp_queue_cmd
*req
;
6593 struct hclge_desc desc
;
6596 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
6598 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
6599 req
->tqp_id
= cpu_to_le16(queue_id
& HCLGE_RING_ID_MASK
);
6601 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
6603 dev_err(&hdev
->pdev
->dev
,
6604 "Get reset status error, status =%d\n", ret
);
6608 return hnae3_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
6611 static u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
,
6614 struct hnae3_queue
*queue
;
6615 struct hclge_tqp
*tqp
;
6617 queue
= handle
->kinfo
.tqp
[queue_id
];
6618 tqp
= container_of(queue
, struct hclge_tqp
, q
);
6623 int hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
6625 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6626 struct hclge_dev
*hdev
= vport
->back
;
6627 int reset_try_times
= 0;
6632 queue_gid
= hclge_covert_handle_qid_global(handle
, queue_id
);
6634 ret
= hclge_tqp_enable(hdev
, queue_id
, 0, false);
6636 dev_err(&hdev
->pdev
->dev
, "Disable tqp fail, ret = %d\n", ret
);
6640 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
6642 dev_err(&hdev
->pdev
->dev
,
6643 "Send reset tqp cmd fail, ret = %d\n", ret
);
6647 reset_try_times
= 0;
6648 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
6649 /* Wait for tqp hw reset */
6651 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
6656 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
6657 dev_err(&hdev
->pdev
->dev
, "Reset TQP fail\n");
6661 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
6663 dev_err(&hdev
->pdev
->dev
,
6664 "Deassert the soft reset fail, ret = %d\n", ret
);
6669 void hclge_reset_vf_queue(struct hclge_vport
*vport
, u16 queue_id
)
6671 struct hclge_dev
*hdev
= vport
->back
;
6672 int reset_try_times
= 0;
6677 queue_gid
= hclge_covert_handle_qid_global(&vport
->nic
, queue_id
);
6679 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, true);
6681 dev_warn(&hdev
->pdev
->dev
,
6682 "Send reset tqp cmd fail, ret = %d\n", ret
);
6686 reset_try_times
= 0;
6687 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
6688 /* Wait for tqp hw reset */
6690 reset_status
= hclge_get_reset_status(hdev
, queue_gid
);
6695 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
6696 dev_warn(&hdev
->pdev
->dev
, "Reset TQP fail\n");
6700 ret
= hclge_send_reset_tqp_cmd(hdev
, queue_gid
, false);
6702 dev_warn(&hdev
->pdev
->dev
,
6703 "Deassert the soft reset fail, ret = %d\n", ret
);
6706 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
6708 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6709 struct hclge_dev
*hdev
= vport
->back
;
6711 return hdev
->fw_version
;
6714 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
6716 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6721 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
6724 phydev
->advertising
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
6727 phydev
->advertising
^= ADVERTISED_Asym_Pause
;
6730 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
6735 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
6736 else if (rx_en
&& !tx_en
)
6737 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
6738 else if (!rx_en
&& tx_en
)
6739 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
6741 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
6743 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
6746 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
6748 dev_err(&hdev
->pdev
->dev
, "configure pauseparam error, ret = %d.\n",
6753 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
6758 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
6760 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6761 u16 remote_advertising
= 0;
6762 u16 local_advertising
= 0;
6763 u32 rx_pause
, tx_pause
;
6766 if (!phydev
->link
|| !phydev
->autoneg
)
6769 if (phydev
->advertising
& ADVERTISED_Pause
)
6770 local_advertising
= ADVERTISE_PAUSE_CAP
;
6772 if (phydev
->advertising
& ADVERTISED_Asym_Pause
)
6773 local_advertising
|= ADVERTISE_PAUSE_ASYM
;
6776 remote_advertising
= LPA_PAUSE_CAP
;
6778 if (phydev
->asym_pause
)
6779 remote_advertising
|= LPA_PAUSE_ASYM
;
6781 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
6782 remote_advertising
);
6783 tx_pause
= flowctl
& FLOW_CTRL_TX
;
6784 rx_pause
= flowctl
& FLOW_CTRL_RX
;
6786 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
6791 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
6794 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
6795 u32
*rx_en
, u32
*tx_en
)
6797 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6798 struct hclge_dev
*hdev
= vport
->back
;
6800 *auto_neg
= hclge_get_autoneg(handle
);
6802 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
6808 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
6811 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
6814 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
6823 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
6824 u32 rx_en
, u32 tx_en
)
6826 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6827 struct hclge_dev
*hdev
= vport
->back
;
6828 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6831 fc_autoneg
= hclge_get_autoneg(handle
);
6832 if (auto_neg
!= fc_autoneg
) {
6833 dev_info(&hdev
->pdev
->dev
,
6834 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6838 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
6839 dev_info(&hdev
->pdev
->dev
,
6840 "Priority flow control enabled. Cannot set link flow control.\n");
6844 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
6847 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
6849 /* Only support flow control negotiation for netdev with
6850 * phy attached for now.
6855 return phy_start_aneg(phydev
);
6858 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
6859 u8
*auto_neg
, u32
*speed
, u8
*duplex
)
6861 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6862 struct hclge_dev
*hdev
= vport
->back
;
6865 *speed
= hdev
->hw
.mac
.speed
;
6867 *duplex
= hdev
->hw
.mac
.duplex
;
6869 *auto_neg
= hdev
->hw
.mac
.autoneg
;
6872 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
)
6874 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6875 struct hclge_dev
*hdev
= vport
->back
;
6878 *media_type
= hdev
->hw
.mac
.media_type
;
6881 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
6882 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
6884 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6885 struct hclge_dev
*hdev
= vport
->back
;
6886 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
6887 int mdix_ctrl
, mdix
, retval
, is_resolved
;
6890 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
6891 *tp_mdix
= ETH_TP_MDI_INVALID
;
6895 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
6897 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
6898 mdix_ctrl
= hnae3_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
6899 HCLGE_PHY_MDIX_CTRL_S
);
6901 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
6902 mdix
= hnae3_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
6903 is_resolved
= hnae3_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
6905 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
6907 switch (mdix_ctrl
) {
6909 *tp_mdix_ctrl
= ETH_TP_MDI
;
6912 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
6915 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
6918 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
6923 *tp_mdix
= ETH_TP_MDI_INVALID
;
6925 *tp_mdix
= ETH_TP_MDI_X
;
6927 *tp_mdix
= ETH_TP_MDI
;
6930 static int hclge_init_instance_hw(struct hclge_dev
*hdev
)
6932 return hclge_mac_connect_phy(hdev
);
6935 static void hclge_uninit_instance_hw(struct hclge_dev
*hdev
)
6937 hclge_mac_disconnect_phy(hdev
);
6940 static int hclge_init_client_instance(struct hnae3_client
*client
,
6941 struct hnae3_ae_dev
*ae_dev
)
6943 struct hclge_dev
*hdev
= ae_dev
->priv
;
6944 struct hclge_vport
*vport
;
6947 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
6948 vport
= &hdev
->vport
[i
];
6950 switch (client
->type
) {
6951 case HNAE3_CLIENT_KNIC
:
6953 hdev
->nic_client
= client
;
6954 vport
->nic
.client
= client
;
6955 ret
= client
->ops
->init_instance(&vport
->nic
);
6959 ret
= hclge_init_instance_hw(hdev
);
6961 client
->ops
->uninit_instance(&vport
->nic
,
6966 hnae3_set_client_init_flag(client
, ae_dev
, 1);
6968 if (hdev
->roce_client
&&
6969 hnae3_dev_roce_supported(hdev
)) {
6970 struct hnae3_client
*rc
= hdev
->roce_client
;
6972 ret
= hclge_init_roce_base_info(vport
);
6976 ret
= rc
->ops
->init_instance(&vport
->roce
);
6980 hnae3_set_client_init_flag(hdev
->roce_client
,
6985 case HNAE3_CLIENT_UNIC
:
6986 hdev
->nic_client
= client
;
6987 vport
->nic
.client
= client
;
6989 ret
= client
->ops
->init_instance(&vport
->nic
);
6993 hnae3_set_client_init_flag(client
, ae_dev
, 1);
6996 case HNAE3_CLIENT_ROCE
:
6997 if (hnae3_dev_roce_supported(hdev
)) {
6998 hdev
->roce_client
= client
;
6999 vport
->roce
.client
= client
;
7002 if (hdev
->roce_client
&& hdev
->nic_client
) {
7003 ret
= hclge_init_roce_base_info(vport
);
7007 ret
= client
->ops
->init_instance(&vport
->roce
);
7011 hnae3_set_client_init_flag(client
, ae_dev
, 1);
7023 hdev
->nic_client
= NULL
;
7024 vport
->nic
.client
= NULL
;
7027 hdev
->roce_client
= NULL
;
7028 vport
->roce
.client
= NULL
;
7032 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
7033 struct hnae3_ae_dev
*ae_dev
)
7035 struct hclge_dev
*hdev
= ae_dev
->priv
;
7036 struct hclge_vport
*vport
;
7039 for (i
= 0; i
< hdev
->num_vmdq_vport
+ 1; i
++) {
7040 vport
= &hdev
->vport
[i
];
7041 if (hdev
->roce_client
) {
7042 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
,
7044 hdev
->roce_client
= NULL
;
7045 vport
->roce
.client
= NULL
;
7047 if (client
->type
== HNAE3_CLIENT_ROCE
)
7049 if (hdev
->nic_client
&& client
->ops
->uninit_instance
) {
7050 hclge_uninit_instance_hw(hdev
);
7051 client
->ops
->uninit_instance(&vport
->nic
, 0);
7052 hdev
->nic_client
= NULL
;
7053 vport
->nic
.client
= NULL
;
7058 static int hclge_pci_init(struct hclge_dev
*hdev
)
7060 struct pci_dev
*pdev
= hdev
->pdev
;
7061 struct hclge_hw
*hw
;
7064 ret
= pci_enable_device(pdev
);
7066 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
7070 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
7072 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
7075 "can't set consistent PCI DMA");
7076 goto err_disable_device
;
7078 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
7081 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
7083 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
7084 goto err_disable_device
;
7087 pci_set_master(pdev
);
7089 hw
->io_base
= pcim_iomap(pdev
, 2, 0);
7091 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
7093 goto err_clr_master
;
7096 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
7100 pci_clear_master(pdev
);
7101 pci_release_regions(pdev
);
7103 pci_disable_device(pdev
);
7108 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
7110 struct pci_dev
*pdev
= hdev
->pdev
;
7112 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
7113 pci_free_irq_vectors(pdev
);
7114 pci_clear_master(pdev
);
7115 pci_release_mem_regions(pdev
);
7116 pci_disable_device(pdev
);
7119 static void hclge_state_init(struct hclge_dev
*hdev
)
7121 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
7122 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7123 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
7124 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
7125 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
7126 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
7129 static void hclge_state_uninit(struct hclge_dev
*hdev
)
7131 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7133 if (hdev
->service_timer
.function
)
7134 del_timer_sync(&hdev
->service_timer
);
7135 if (hdev
->reset_timer
.function
)
7136 del_timer_sync(&hdev
->reset_timer
);
7137 if (hdev
->service_task
.func
)
7138 cancel_work_sync(&hdev
->service_task
);
7139 if (hdev
->rst_service_task
.func
)
7140 cancel_work_sync(&hdev
->rst_service_task
);
7141 if (hdev
->mbx_service_task
.func
)
7142 cancel_work_sync(&hdev
->mbx_service_task
);
7145 static void hclge_flr_prepare(struct hnae3_ae_dev
*ae_dev
)
7147 #define HCLGE_FLR_WAIT_MS 100
7148 #define HCLGE_FLR_WAIT_CNT 50
7149 struct hclge_dev
*hdev
= ae_dev
->priv
;
7152 clear_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
);
7153 clear_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
);
7154 set_bit(HNAE3_FLR_RESET
, &hdev
->default_reset_request
);
7155 hclge_reset_event(hdev
->pdev
, NULL
);
7157 while (!test_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
) &&
7158 cnt
++ < HCLGE_FLR_WAIT_CNT
)
7159 msleep(HCLGE_FLR_WAIT_MS
);
7161 if (!test_bit(HNAE3_FLR_DOWN
, &hdev
->flr_state
))
7162 dev_err(&hdev
->pdev
->dev
,
7163 "flr wait down timeout: %d\n", cnt
);
7166 static void hclge_flr_done(struct hnae3_ae_dev
*ae_dev
)
7168 struct hclge_dev
*hdev
= ae_dev
->priv
;
7170 set_bit(HNAE3_FLR_DONE
, &hdev
->flr_state
);
7173 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
7175 struct pci_dev
*pdev
= ae_dev
->pdev
;
7176 struct hclge_dev
*hdev
;
7179 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
7186 hdev
->ae_dev
= ae_dev
;
7187 hdev
->reset_type
= HNAE3_NONE_RESET
;
7188 hdev
->reset_level
= HNAE3_FUNC_RESET
;
7189 ae_dev
->priv
= hdev
;
7190 hdev
->mps
= ETH_FRAME_LEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
7192 mutex_init(&hdev
->vport_lock
);
7194 ret
= hclge_pci_init(hdev
);
7196 dev_err(&pdev
->dev
, "PCI init failed\n");
7200 /* Firmware command queue initialize */
7201 ret
= hclge_cmd_queue_init(hdev
);
7203 dev_err(&pdev
->dev
, "Cmd queue init failed, ret = %d.\n", ret
);
7204 goto err_pci_uninit
;
7207 /* Firmware command initialize */
7208 ret
= hclge_cmd_init(hdev
);
7210 goto err_cmd_uninit
;
7212 ret
= hclge_get_cap(hdev
);
7214 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
7216 goto err_cmd_uninit
;
7219 ret
= hclge_configure(hdev
);
7221 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
7222 goto err_cmd_uninit
;
7225 ret
= hclge_init_msi(hdev
);
7227 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
7228 goto err_cmd_uninit
;
7231 ret
= hclge_misc_irq_init(hdev
);
7234 "Misc IRQ(vector0) init error, ret = %d.\n",
7236 goto err_msi_uninit
;
7239 ret
= hclge_alloc_tqps(hdev
);
7241 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
7242 goto err_msi_irq_uninit
;
7245 ret
= hclge_alloc_vport(hdev
);
7247 dev_err(&pdev
->dev
, "Allocate vport error, ret = %d.\n", ret
);
7248 goto err_msi_irq_uninit
;
7251 ret
= hclge_map_tqp(hdev
);
7253 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
7254 goto err_msi_irq_uninit
;
7257 if (hdev
->hw
.mac
.media_type
== HNAE3_MEDIA_TYPE_COPPER
) {
7258 ret
= hclge_mac_mdio_config(hdev
);
7260 dev_err(&hdev
->pdev
->dev
,
7261 "mdio config fail ret=%d\n", ret
);
7262 goto err_msi_irq_uninit
;
7266 ret
= hclge_init_umv_space(hdev
);
7268 dev_err(&pdev
->dev
, "umv space init error, ret=%d.\n", ret
);
7269 goto err_msi_irq_uninit
;
7272 ret
= hclge_mac_init(hdev
);
7274 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
7275 goto err_mdiobus_unreg
;
7278 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
7280 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
7281 goto err_mdiobus_unreg
;
7284 ret
= hclge_config_gro(hdev
, true);
7286 goto err_mdiobus_unreg
;
7288 ret
= hclge_init_vlan_config(hdev
);
7290 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
7291 goto err_mdiobus_unreg
;
7294 ret
= hclge_tm_schd_init(hdev
);
7296 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
7297 goto err_mdiobus_unreg
;
7300 hclge_rss_init_cfg(hdev
);
7301 ret
= hclge_rss_init_hw(hdev
);
7303 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
7304 goto err_mdiobus_unreg
;
7307 ret
= init_mgr_tbl(hdev
);
7309 dev_err(&pdev
->dev
, "manager table init fail, ret =%d\n", ret
);
7310 goto err_mdiobus_unreg
;
7313 ret
= hclge_init_fd_config(hdev
);
7316 "fd table init fail, ret=%d\n", ret
);
7317 goto err_mdiobus_unreg
;
7320 ret
= hclge_hw_error_set_state(hdev
, true);
7323 "fail(%d) to enable hw error interrupts\n", ret
);
7324 goto err_mdiobus_unreg
;
7327 hclge_dcb_ops_set(hdev
);
7329 timer_setup(&hdev
->service_timer
, hclge_service_timer
, 0);
7330 timer_setup(&hdev
->reset_timer
, hclge_reset_timer
, 0);
7331 INIT_WORK(&hdev
->service_task
, hclge_service_task
);
7332 INIT_WORK(&hdev
->rst_service_task
, hclge_reset_service_task
);
7333 INIT_WORK(&hdev
->mbx_service_task
, hclge_mailbox_service_task
);
7335 hclge_clear_all_event_cause(hdev
);
7337 /* Enable MISC vector(vector0) */
7338 hclge_enable_vector(&hdev
->misc_vector
, true);
7340 hclge_state_init(hdev
);
7341 hdev
->last_reset_time
= jiffies
;
7343 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME
);
7347 if (hdev
->hw
.mac
.phydev
)
7348 mdiobus_unregister(hdev
->hw
.mac
.mdio_bus
);
7350 hclge_misc_irq_uninit(hdev
);
7352 pci_free_irq_vectors(pdev
);
7354 hclge_destroy_cmd_queue(&hdev
->hw
);
7356 pcim_iounmap(pdev
, hdev
->hw
.io_base
);
7357 pci_clear_master(pdev
);
7358 pci_release_regions(pdev
);
7359 pci_disable_device(pdev
);
7364 static void hclge_stats_clear(struct hclge_dev
*hdev
)
7366 memset(&hdev
->hw_stats
, 0, sizeof(hdev
->hw_stats
));
7369 static void hclge_reset_vport_state(struct hclge_dev
*hdev
)
7371 struct hclge_vport
*vport
= hdev
->vport
;
7374 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
7375 hclge_vport_start(vport
);
7380 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
7382 struct hclge_dev
*hdev
= ae_dev
->priv
;
7383 struct pci_dev
*pdev
= ae_dev
->pdev
;
7386 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7388 hclge_stats_clear(hdev
);
7389 memset(hdev
->vlan_table
, 0, sizeof(hdev
->vlan_table
));
7391 ret
= hclge_cmd_init(hdev
);
7393 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
7397 ret
= hclge_get_cap(hdev
);
7399 dev_err(&pdev
->dev
, "get hw capability error, ret = %d.\n",
7404 ret
= hclge_configure(hdev
);
7406 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
7410 ret
= hclge_map_tqp(hdev
);
7412 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
7416 hclge_reset_umv_space(hdev
);
7418 ret
= hclge_mac_init(hdev
);
7420 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
7424 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
7426 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
7430 ret
= hclge_config_gro(hdev
, true);
7434 ret
= hclge_init_vlan_config(hdev
);
7436 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
7440 ret
= hclge_tm_init_hw(hdev
);
7442 dev_err(&pdev
->dev
, "tm init hw fail, ret =%d\n", ret
);
7446 ret
= hclge_rss_init_hw(hdev
);
7448 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
7452 ret
= hclge_init_fd_config(hdev
);
7455 "fd table init fail, ret=%d\n", ret
);
7459 /* Re-enable the hw error interrupts because
7460 * the interrupts get disabled on core/global reset.
7462 ret
= hclge_hw_error_set_state(hdev
, true);
7465 "fail(%d) to re-enable HNS hw error interrupts\n", ret
);
7469 hclge_reset_vport_state(hdev
);
7471 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
7477 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
7479 struct hclge_dev
*hdev
= ae_dev
->priv
;
7480 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
7482 hclge_state_uninit(hdev
);
7485 mdiobus_unregister(mac
->mdio_bus
);
7487 hclge_uninit_umv_space(hdev
);
7489 /* Disable MISC vector(vector0) */
7490 hclge_enable_vector(&hdev
->misc_vector
, false);
7491 synchronize_irq(hdev
->misc_vector
.vector_irq
);
7493 hclge_hw_error_set_state(hdev
, false);
7494 hclge_destroy_cmd_queue(&hdev
->hw
);
7495 hclge_misc_irq_uninit(hdev
);
7496 hclge_pci_uninit(hdev
);
7497 mutex_destroy(&hdev
->vport_lock
);
7498 ae_dev
->priv
= NULL
;
7501 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
7503 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
7504 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7505 struct hclge_dev
*hdev
= vport
->back
;
7507 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
7510 static void hclge_get_channels(struct hnae3_handle
*handle
,
7511 struct ethtool_channels
*ch
)
7513 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7515 ch
->max_combined
= hclge_get_max_channels(handle
);
7516 ch
->other_count
= 1;
7518 ch
->combined_count
= vport
->alloc_tqps
;
7521 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
7522 u16
*alloc_tqps
, u16
*max_rss_size
)
7524 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7525 struct hclge_dev
*hdev
= vport
->back
;
7527 *alloc_tqps
= vport
->alloc_tqps
;
7528 *max_rss_size
= hdev
->rss_size_max
;
7531 static void hclge_release_tqp(struct hclge_vport
*vport
)
7533 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
7534 struct hclge_dev
*hdev
= vport
->back
;
7537 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
7538 struct hclge_tqp
*tqp
=
7539 container_of(kinfo
->tqp
[i
], struct hclge_tqp
, q
);
7541 tqp
->q
.handle
= NULL
;
7542 tqp
->q
.tqp_index
= 0;
7543 tqp
->alloced
= false;
7546 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
7550 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
)
7552 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7553 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
7554 struct hclge_dev
*hdev
= vport
->back
;
7555 int cur_rss_size
= kinfo
->rss_size
;
7556 int cur_tqps
= kinfo
->num_tqps
;
7557 u16 tc_offset
[HCLGE_MAX_TC_NUM
];
7558 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
7559 u16 tc_size
[HCLGE_MAX_TC_NUM
];
7564 /* Free old tqps, and reallocate with new tqp number when nic setup */
7565 hclge_release_tqp(vport
);
7567 ret
= hclge_knic_setup(vport
, new_tqps_num
, kinfo
->num_desc
);
7569 dev_err(&hdev
->pdev
->dev
, "setup nic fail, ret =%d\n", ret
);
7573 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
7575 dev_err(&hdev
->pdev
->dev
, "map vport tqp fail, ret =%d\n", ret
);
7579 ret
= hclge_tm_schd_init(hdev
);
7581 dev_err(&hdev
->pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
7585 roundup_size
= roundup_pow_of_two(kinfo
->rss_size
);
7586 roundup_size
= ilog2(roundup_size
);
7587 /* Set the RSS TC mode according to the new RSS size */
7588 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
7591 if (!(hdev
->hw_tc_map
& BIT(i
)))
7595 tc_size
[i
] = roundup_size
;
7596 tc_offset
[i
] = kinfo
->rss_size
* i
;
7598 ret
= hclge_set_rss_tc_mode(hdev
, tc_valid
, tc_size
, tc_offset
);
7602 /* Reinitializes the rss indirect table according to the new RSS size */
7603 rss_indir
= kcalloc(HCLGE_RSS_IND_TBL_SIZE
, sizeof(u32
), GFP_KERNEL
);
7607 for (i
= 0; i
< HCLGE_RSS_IND_TBL_SIZE
; i
++)
7608 rss_indir
[i
] = i
% kinfo
->rss_size
;
7610 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
7612 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
7618 dev_info(&hdev
->pdev
->dev
,
7619 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7620 cur_rss_size
, kinfo
->rss_size
,
7621 cur_tqps
, kinfo
->rss_size
* kinfo
->num_tc
);
7626 static int hclge_get_regs_num(struct hclge_dev
*hdev
, u32
*regs_num_32_bit
,
7627 u32
*regs_num_64_bit
)
7629 struct hclge_desc desc
;
7633 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_REG_NUM
, true);
7634 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7636 dev_err(&hdev
->pdev
->dev
,
7637 "Query register number cmd failed, ret = %d.\n", ret
);
7641 *regs_num_32_bit
= le32_to_cpu(desc
.data
[0]);
7642 *regs_num_64_bit
= le32_to_cpu(desc
.data
[1]);
7644 total_num
= *regs_num_32_bit
+ *regs_num_64_bit
;
7651 static int hclge_get_32_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
7654 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7656 struct hclge_desc
*desc
;
7657 u32
*reg_val
= data
;
7666 cmd_num
= DIV_ROUND_UP(regs_num
+ 2, HCLGE_32_BIT_REG_RTN_DATANUM
);
7667 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
7671 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_32_BIT_REG
, true);
7672 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
7674 dev_err(&hdev
->pdev
->dev
,
7675 "Query 32 bit register cmd failed, ret = %d.\n", ret
);
7680 for (i
= 0; i
< cmd_num
; i
++) {
7682 desc_data
= (__le32
*)(&desc
[i
].data
[0]);
7683 n
= HCLGE_32_BIT_REG_RTN_DATANUM
- 2;
7685 desc_data
= (__le32
*)(&desc
[i
]);
7686 n
= HCLGE_32_BIT_REG_RTN_DATANUM
;
7688 for (k
= 0; k
< n
; k
++) {
7689 *reg_val
++ = le32_to_cpu(*desc_data
++);
7701 static int hclge_get_64_bit_regs(struct hclge_dev
*hdev
, u32 regs_num
,
7704 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7706 struct hclge_desc
*desc
;
7707 u64
*reg_val
= data
;
7716 cmd_num
= DIV_ROUND_UP(regs_num
+ 1, HCLGE_64_BIT_REG_RTN_DATANUM
);
7717 desc
= kcalloc(cmd_num
, sizeof(struct hclge_desc
), GFP_KERNEL
);
7721 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QUERY_64_BIT_REG
, true);
7722 ret
= hclge_cmd_send(&hdev
->hw
, desc
, cmd_num
);
7724 dev_err(&hdev
->pdev
->dev
,
7725 "Query 64 bit register cmd failed, ret = %d.\n", ret
);
7730 for (i
= 0; i
< cmd_num
; i
++) {
7732 desc_data
= (__le64
*)(&desc
[i
].data
[0]);
7733 n
= HCLGE_64_BIT_REG_RTN_DATANUM
- 1;
7735 desc_data
= (__le64
*)(&desc
[i
]);
7736 n
= HCLGE_64_BIT_REG_RTN_DATANUM
;
7738 for (k
= 0; k
< n
; k
++) {
7739 *reg_val
++ = le64_to_cpu(*desc_data
++);
7751 #define MAX_SEPARATE_NUM 4
7752 #define SEPARATOR_VALUE 0xFFFFFFFF
7753 #define REG_NUM_PER_LINE 4
7754 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
7756 static int hclge_get_regs_len(struct hnae3_handle
*handle
)
7758 int cmdq_lines
, common_lines
, ring_lines
, tqp_intr_lines
;
7759 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
7760 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7761 struct hclge_dev
*hdev
= vport
->back
;
7762 u32 regs_num_32_bit
, regs_num_64_bit
;
7765 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
7767 dev_err(&hdev
->pdev
->dev
,
7768 "Get register number failed, ret = %d.\n", ret
);
7772 cmdq_lines
= sizeof(cmdq_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7773 common_lines
= sizeof(common_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7774 ring_lines
= sizeof(ring_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7775 tqp_intr_lines
= sizeof(tqp_intr_reg_addr_list
) / REG_LEN_PER_LINE
+ 1;
7777 return (cmdq_lines
+ common_lines
+ ring_lines
* kinfo
->num_tqps
+
7778 tqp_intr_lines
* (hdev
->num_msi_used
- 1)) * REG_LEN_PER_LINE
+
7779 regs_num_32_bit
* sizeof(u32
) + regs_num_64_bit
* sizeof(u64
);
7782 static void hclge_get_regs(struct hnae3_handle
*handle
, u32
*version
,
7785 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
7786 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7787 struct hclge_dev
*hdev
= vport
->back
;
7788 u32 regs_num_32_bit
, regs_num_64_bit
;
7789 int i
, j
, reg_um
, separator_num
;
7793 *version
= hdev
->fw_version
;
7795 ret
= hclge_get_regs_num(hdev
, ®s_num_32_bit
, ®s_num_64_bit
);
7797 dev_err(&hdev
->pdev
->dev
,
7798 "Get register number failed, ret = %d.\n", ret
);
7802 /* fetching per-PF registers valus from PF PCIe register space */
7803 reg_um
= sizeof(cmdq_reg_addr_list
) / sizeof(u32
);
7804 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7805 for (i
= 0; i
< reg_um
; i
++)
7806 *reg
++ = hclge_read_dev(&hdev
->hw
, cmdq_reg_addr_list
[i
]);
7807 for (i
= 0; i
< separator_num
; i
++)
7808 *reg
++ = SEPARATOR_VALUE
;
7810 reg_um
= sizeof(common_reg_addr_list
) / sizeof(u32
);
7811 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7812 for (i
= 0; i
< reg_um
; i
++)
7813 *reg
++ = hclge_read_dev(&hdev
->hw
, common_reg_addr_list
[i
]);
7814 for (i
= 0; i
< separator_num
; i
++)
7815 *reg
++ = SEPARATOR_VALUE
;
7817 reg_um
= sizeof(ring_reg_addr_list
) / sizeof(u32
);
7818 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7819 for (j
= 0; j
< kinfo
->num_tqps
; j
++) {
7820 for (i
= 0; i
< reg_um
; i
++)
7821 *reg
++ = hclge_read_dev(&hdev
->hw
,
7822 ring_reg_addr_list
[i
] +
7824 for (i
= 0; i
< separator_num
; i
++)
7825 *reg
++ = SEPARATOR_VALUE
;
7828 reg_um
= sizeof(tqp_intr_reg_addr_list
) / sizeof(u32
);
7829 separator_num
= MAX_SEPARATE_NUM
- reg_um
% REG_NUM_PER_LINE
;
7830 for (j
= 0; j
< hdev
->num_msi_used
- 1; j
++) {
7831 for (i
= 0; i
< reg_um
; i
++)
7832 *reg
++ = hclge_read_dev(&hdev
->hw
,
7833 tqp_intr_reg_addr_list
[i
] +
7835 for (i
= 0; i
< separator_num
; i
++)
7836 *reg
++ = SEPARATOR_VALUE
;
7839 /* fetching PF common registers values from firmware */
7840 ret
= hclge_get_32_bit_regs(hdev
, regs_num_32_bit
, reg
);
7842 dev_err(&hdev
->pdev
->dev
,
7843 "Get 32 bit register failed, ret = %d.\n", ret
);
7847 reg
+= regs_num_32_bit
;
7848 ret
= hclge_get_64_bit_regs(hdev
, regs_num_64_bit
, reg
);
7850 dev_err(&hdev
->pdev
->dev
,
7851 "Get 64 bit register failed, ret = %d.\n", ret
);
7854 static int hclge_set_led_status(struct hclge_dev
*hdev
, u8 locate_led_status
)
7856 struct hclge_set_led_state_cmd
*req
;
7857 struct hclge_desc desc
;
7860 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_LED_STATUS_CFG
, false);
7862 req
= (struct hclge_set_led_state_cmd
*)desc
.data
;
7863 hnae3_set_field(req
->locate_led_config
, HCLGE_LED_LOCATE_STATE_M
,
7864 HCLGE_LED_LOCATE_STATE_S
, locate_led_status
);
7866 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7868 dev_err(&hdev
->pdev
->dev
,
7869 "Send set led state cmd error, ret =%d\n", ret
);
7874 enum hclge_led_status
{
7877 HCLGE_LED_NO_CHANGE
= 0xFF,
7880 static int hclge_set_led_id(struct hnae3_handle
*handle
,
7881 enum ethtool_phys_id_state status
)
7883 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7884 struct hclge_dev
*hdev
= vport
->back
;
7887 case ETHTOOL_ID_ACTIVE
:
7888 return hclge_set_led_status(hdev
, HCLGE_LED_ON
);
7889 case ETHTOOL_ID_INACTIVE
:
7890 return hclge_set_led_status(hdev
, HCLGE_LED_OFF
);
7896 static void hclge_get_link_mode(struct hnae3_handle
*handle
,
7897 unsigned long *supported
,
7898 unsigned long *advertising
)
7900 unsigned int size
= BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS
);
7901 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7902 struct hclge_dev
*hdev
= vport
->back
;
7903 unsigned int idx
= 0;
7905 for (; idx
< size
; idx
++) {
7906 supported
[idx
] = hdev
->hw
.mac
.supported
[idx
];
7907 advertising
[idx
] = hdev
->hw
.mac
.advertising
[idx
];
7911 static const struct hnae3_ae_ops hclge_ops
= {
7912 .init_ae_dev
= hclge_init_ae_dev
,
7913 .uninit_ae_dev
= hclge_uninit_ae_dev
,
7914 .flr_prepare
= hclge_flr_prepare
,
7915 .flr_done
= hclge_flr_done
,
7916 .init_client_instance
= hclge_init_client_instance
,
7917 .uninit_client_instance
= hclge_uninit_client_instance
,
7918 .map_ring_to_vector
= hclge_map_ring_to_vector
,
7919 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
7920 .get_vector
= hclge_get_vector
,
7921 .put_vector
= hclge_put_vector
,
7922 .set_promisc_mode
= hclge_set_promisc_mode
,
7923 .set_loopback
= hclge_set_loopback
,
7924 .start
= hclge_ae_start
,
7925 .stop
= hclge_ae_stop
,
7926 .client_start
= hclge_client_start
,
7927 .client_stop
= hclge_client_stop
,
7928 .get_status
= hclge_get_status
,
7929 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
7930 .update_speed_duplex_h
= hclge_update_speed_duplex_h
,
7931 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
7932 .get_media_type
= hclge_get_media_type
,
7933 .get_rss_key_size
= hclge_get_rss_key_size
,
7934 .get_rss_indir_size
= hclge_get_rss_indir_size
,
7935 .get_rss
= hclge_get_rss
,
7936 .set_rss
= hclge_set_rss
,
7937 .set_rss_tuple
= hclge_set_rss_tuple
,
7938 .get_rss_tuple
= hclge_get_rss_tuple
,
7939 .get_tc_size
= hclge_get_tc_size
,
7940 .get_mac_addr
= hclge_get_mac_addr
,
7941 .set_mac_addr
= hclge_set_mac_addr
,
7942 .do_ioctl
= hclge_do_ioctl
,
7943 .add_uc_addr
= hclge_add_uc_addr
,
7944 .rm_uc_addr
= hclge_rm_uc_addr
,
7945 .add_mc_addr
= hclge_add_mc_addr
,
7946 .rm_mc_addr
= hclge_rm_mc_addr
,
7947 .set_autoneg
= hclge_set_autoneg
,
7948 .get_autoneg
= hclge_get_autoneg
,
7949 .get_pauseparam
= hclge_get_pauseparam
,
7950 .set_pauseparam
= hclge_set_pauseparam
,
7951 .set_mtu
= hclge_set_mtu
,
7952 .reset_queue
= hclge_reset_tqp
,
7953 .get_stats
= hclge_get_stats
,
7954 .update_stats
= hclge_update_stats
,
7955 .get_strings
= hclge_get_strings
,
7956 .get_sset_count
= hclge_get_sset_count
,
7957 .get_fw_version
= hclge_get_fw_version
,
7958 .get_mdix_mode
= hclge_get_mdix_mode
,
7959 .enable_vlan_filter
= hclge_enable_vlan_filter
,
7960 .set_vlan_filter
= hclge_set_vlan_filter
,
7961 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
7962 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
7963 .reset_event
= hclge_reset_event
,
7964 .set_default_reset_request
= hclge_set_def_reset_request
,
7965 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
7966 .set_channels
= hclge_set_channels
,
7967 .get_channels
= hclge_get_channels
,
7968 .get_regs_len
= hclge_get_regs_len
,
7969 .get_regs
= hclge_get_regs
,
7970 .set_led_id
= hclge_set_led_id
,
7971 .get_link_mode
= hclge_get_link_mode
,
7972 .add_fd_entry
= hclge_add_fd_entry
,
7973 .del_fd_entry
= hclge_del_fd_entry
,
7974 .del_all_fd_entries
= hclge_del_all_fd_entries
,
7975 .get_fd_rule_cnt
= hclge_get_fd_rule_cnt
,
7976 .get_fd_rule_info
= hclge_get_fd_rule_info
,
7977 .get_fd_all_rules
= hclge_get_all_rules
,
7978 .restore_fd_rules
= hclge_restore_fd_entries
,
7979 .enable_fd
= hclge_enable_fd
,
7980 .dbg_run_cmd
= hclge_dbg_run_cmd
,
7981 .handle_hw_ras_error
= hclge_handle_hw_ras_error
,
7982 .get_hw_reset_stat
= hclge_get_hw_reset_stat
,
7983 .ae_dev_resetting
= hclge_ae_dev_resetting
,
7984 .ae_dev_reset_cnt
= hclge_ae_dev_reset_cnt
,
7987 static struct hnae3_ae_algo ae_algo
= {
7989 .pdev_id_table
= ae_algo_pci_tbl
,
7992 static int hclge_init(void)
7994 pr_info("%s is initializing\n", HCLGE_NAME
);
7996 hnae3_register_ae_algo(&ae_algo
);
8001 static void hclge_exit(void)
8003 hnae3_unregister_ae_algo(&ae_algo
);
8005 module_init(hclge_init
);
8006 module_exit(hclge_exit
);
8008 MODULE_LICENSE("GPL");
8009 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8010 MODULE_DESCRIPTION("HCLGE Driver");
8011 MODULE_VERSION(HCLGE_MOD_VERSION
);