]>
Commit | Line | Data |
---|---|---|
ef57c40f JS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. | |
46a3df9f S |
3 | |
4 | #include <linux/acpi.h> | |
5 | #include <linux/device.h> | |
6 | #include <linux/etherdevice.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/platform_device.h> | |
7393ed39 | 14 | #include <linux/if_vlan.h> |
d5752031 | 15 | #include <net/rtnetlink.h> |
46a3df9f | 16 | #include "hclge_cmd.h" |
cacde272 | 17 | #include "hclge_dcb.h" |
46a3df9f | 18 | #include "hclge_main.h" |
0cdbdd3e | 19 | #include "hclge_mbx.h" |
46a3df9f S |
20 | #include "hclge_mdio.h" |
21 | #include "hclge_tm.h" | |
00bb612a | 22 | #include "hclge_err.h" |
46a3df9f S |
23 | #include "hnae3.h" |
24 | ||
25 | #define HCLGE_NAME "hclge" | |
26 | #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) | |
27 | #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) | |
46a3df9f | 28 | |
89b4e1bb YL |
29 | #define HCLGE_BUF_SIZE_UNIT 256 |
30 | ||
4ee09281 | 31 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); |
46a3df9f | 32 | static int hclge_init_vlan_config(struct hclge_dev *hdev); |
4ed340ab | 33 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); |
2da5ec58 JS |
34 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, |
35 | u16 *allocated_size, bool is_alloc); | |
46a3df9f S |
36 | |
37 | static struct hnae3_ae_algo ae_algo; | |
38 | ||
39 | static const struct pci_device_id ae_algo_pci_tbl[] = { | |
40 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | |
41 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | |
42 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | |
43 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | |
44 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | |
45 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | |
46 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | |
e92a0843 | 47 | /* required last entry */ |
46a3df9f S |
48 | {0, } |
49 | }; | |
50 | ||
28d9cec8 YL |
51 | MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); |
52 | ||
a1018e31 JS |
53 | static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, |
54 | HCLGE_CMDQ_TX_ADDR_H_REG, | |
55 | HCLGE_CMDQ_TX_DEPTH_REG, | |
56 | HCLGE_CMDQ_TX_TAIL_REG, | |
57 | HCLGE_CMDQ_TX_HEAD_REG, | |
58 | HCLGE_CMDQ_RX_ADDR_L_REG, | |
59 | HCLGE_CMDQ_RX_ADDR_H_REG, | |
60 | HCLGE_CMDQ_RX_DEPTH_REG, | |
61 | HCLGE_CMDQ_RX_TAIL_REG, | |
62 | HCLGE_CMDQ_RX_HEAD_REG, | |
63 | HCLGE_VECTOR0_CMDQ_SRC_REG, | |
64 | HCLGE_CMDQ_INTR_STS_REG, | |
65 | HCLGE_CMDQ_INTR_EN_REG, | |
66 | HCLGE_CMDQ_INTR_GEN_REG}; | |
67 | ||
68 | static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, | |
69 | HCLGE_VECTOR0_OTER_EN_REG, | |
70 | HCLGE_MISC_RESET_STS_REG, | |
71 | HCLGE_MISC_VECTOR_INT_STS, | |
72 | HCLGE_GLOBAL_RESET_REG, | |
73 | HCLGE_FUN_RST_ING, | |
74 | HCLGE_GRO_EN_REG}; | |
75 | ||
76 | static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, | |
77 | HCLGE_RING_RX_ADDR_H_REG, | |
78 | HCLGE_RING_RX_BD_NUM_REG, | |
79 | HCLGE_RING_RX_BD_LENGTH_REG, | |
80 | HCLGE_RING_RX_MERGE_EN_REG, | |
81 | HCLGE_RING_RX_TAIL_REG, | |
82 | HCLGE_RING_RX_HEAD_REG, | |
83 | HCLGE_RING_RX_FBD_NUM_REG, | |
84 | HCLGE_RING_RX_OFFSET_REG, | |
85 | HCLGE_RING_RX_FBD_OFFSET_REG, | |
86 | HCLGE_RING_RX_STASH_REG, | |
87 | HCLGE_RING_RX_BD_ERR_REG, | |
88 | HCLGE_RING_TX_ADDR_L_REG, | |
89 | HCLGE_RING_TX_ADDR_H_REG, | |
90 | HCLGE_RING_TX_BD_NUM_REG, | |
91 | HCLGE_RING_TX_PRIORITY_REG, | |
92 | HCLGE_RING_TX_TC_REG, | |
93 | HCLGE_RING_TX_MERGE_EN_REG, | |
94 | HCLGE_RING_TX_TAIL_REG, | |
95 | HCLGE_RING_TX_HEAD_REG, | |
96 | HCLGE_RING_TX_FBD_NUM_REG, | |
97 | HCLGE_RING_TX_OFFSET_REG, | |
98 | HCLGE_RING_TX_EBD_NUM_REG, | |
99 | HCLGE_RING_TX_EBD_OFFSET_REG, | |
100 | HCLGE_RING_TX_BD_ERR_REG, | |
101 | HCLGE_RING_EN_REG}; | |
102 | ||
103 | static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, | |
104 | HCLGE_TQP_INTR_GL0_REG, | |
105 | HCLGE_TQP_INTR_GL1_REG, | |
106 | HCLGE_TQP_INTR_GL2_REG, | |
107 | HCLGE_TQP_INTR_RL_REG}; | |
108 | ||
46a3df9f | 109 | static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { |
67b8c316 | 110 | "App Loopback test", |
86957272 FL |
111 | "Serdes serial Loopback test", |
112 | "Serdes parallel Loopback test", | |
46a3df9f S |
113 | "Phy Loopback test" |
114 | }; | |
115 | ||
46a3df9f S |
116 | static const struct hclge_comm_stats_str g_mac_stats_string[] = { |
117 | {"mac_tx_mac_pause_num", | |
118 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, | |
119 | {"mac_rx_mac_pause_num", | |
120 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, | |
121 | {"mac_tx_pfc_pri0_pkt_num", | |
122 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, | |
123 | {"mac_tx_pfc_pri1_pkt_num", | |
124 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, | |
125 | {"mac_tx_pfc_pri2_pkt_num", | |
126 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, | |
127 | {"mac_tx_pfc_pri3_pkt_num", | |
128 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, | |
129 | {"mac_tx_pfc_pri4_pkt_num", | |
130 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, | |
131 | {"mac_tx_pfc_pri5_pkt_num", | |
132 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, | |
133 | {"mac_tx_pfc_pri6_pkt_num", | |
134 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, | |
135 | {"mac_tx_pfc_pri7_pkt_num", | |
136 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, | |
137 | {"mac_rx_pfc_pri0_pkt_num", | |
138 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, | |
139 | {"mac_rx_pfc_pri1_pkt_num", | |
140 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, | |
141 | {"mac_rx_pfc_pri2_pkt_num", | |
142 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, | |
143 | {"mac_rx_pfc_pri3_pkt_num", | |
144 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, | |
145 | {"mac_rx_pfc_pri4_pkt_num", | |
146 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, | |
147 | {"mac_rx_pfc_pri5_pkt_num", | |
148 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, | |
149 | {"mac_rx_pfc_pri6_pkt_num", | |
150 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, | |
151 | {"mac_rx_pfc_pri7_pkt_num", | |
152 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, | |
153 | {"mac_tx_total_pkt_num", | |
154 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, | |
155 | {"mac_tx_total_oct_num", | |
156 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, | |
157 | {"mac_tx_good_pkt_num", | |
158 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, | |
159 | {"mac_tx_bad_pkt_num", | |
160 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, | |
161 | {"mac_tx_good_oct_num", | |
162 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, | |
163 | {"mac_tx_bad_oct_num", | |
164 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, | |
165 | {"mac_tx_uni_pkt_num", | |
166 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, | |
167 | {"mac_tx_multi_pkt_num", | |
168 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, | |
169 | {"mac_tx_broad_pkt_num", | |
170 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, | |
171 | {"mac_tx_undersize_pkt_num", | |
172 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, | |
f3426583 JS |
173 | {"mac_tx_oversize_pkt_num", |
174 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, | |
46a3df9f S |
175 | {"mac_tx_64_oct_pkt_num", |
176 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, | |
177 | {"mac_tx_65_127_oct_pkt_num", | |
178 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, | |
179 | {"mac_tx_128_255_oct_pkt_num", | |
180 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, | |
181 | {"mac_tx_256_511_oct_pkt_num", | |
182 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, | |
183 | {"mac_tx_512_1023_oct_pkt_num", | |
184 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, | |
185 | {"mac_tx_1024_1518_oct_pkt_num", | |
186 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, | |
b42874e4 JS |
187 | {"mac_tx_1519_2047_oct_pkt_num", |
188 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, | |
189 | {"mac_tx_2048_4095_oct_pkt_num", | |
190 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, | |
191 | {"mac_tx_4096_8191_oct_pkt_num", | |
192 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, | |
b42874e4 JS |
193 | {"mac_tx_8192_9216_oct_pkt_num", |
194 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, | |
195 | {"mac_tx_9217_12287_oct_pkt_num", | |
196 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, | |
197 | {"mac_tx_12288_16383_oct_pkt_num", | |
198 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, | |
199 | {"mac_tx_1519_max_good_pkt_num", | |
200 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, | |
201 | {"mac_tx_1519_max_bad_pkt_num", | |
202 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f S |
203 | {"mac_rx_total_pkt_num", |
204 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, | |
205 | {"mac_rx_total_oct_num", | |
206 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, | |
207 | {"mac_rx_good_pkt_num", | |
208 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, | |
209 | {"mac_rx_bad_pkt_num", | |
210 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, | |
211 | {"mac_rx_good_oct_num", | |
212 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, | |
213 | {"mac_rx_bad_oct_num", | |
214 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, | |
215 | {"mac_rx_uni_pkt_num", | |
216 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, | |
217 | {"mac_rx_multi_pkt_num", | |
218 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, | |
219 | {"mac_rx_broad_pkt_num", | |
220 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, | |
221 | {"mac_rx_undersize_pkt_num", | |
222 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, | |
f3426583 JS |
223 | {"mac_rx_oversize_pkt_num", |
224 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, | |
46a3df9f S |
225 | {"mac_rx_64_oct_pkt_num", |
226 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, | |
227 | {"mac_rx_65_127_oct_pkt_num", | |
228 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, | |
229 | {"mac_rx_128_255_oct_pkt_num", | |
230 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, | |
231 | {"mac_rx_256_511_oct_pkt_num", | |
232 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, | |
233 | {"mac_rx_512_1023_oct_pkt_num", | |
234 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, | |
235 | {"mac_rx_1024_1518_oct_pkt_num", | |
236 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, | |
b42874e4 JS |
237 | {"mac_rx_1519_2047_oct_pkt_num", |
238 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, | |
239 | {"mac_rx_2048_4095_oct_pkt_num", | |
240 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, | |
241 | {"mac_rx_4096_8191_oct_pkt_num", | |
242 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, | |
b42874e4 JS |
243 | {"mac_rx_8192_9216_oct_pkt_num", |
244 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, | |
245 | {"mac_rx_9217_12287_oct_pkt_num", | |
246 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, | |
247 | {"mac_rx_12288_16383_oct_pkt_num", | |
248 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, | |
249 | {"mac_rx_1519_max_good_pkt_num", | |
250 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, | |
251 | {"mac_rx_1519_max_bad_pkt_num", | |
252 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f | 253 | |
c36317be JS |
254 | {"mac_tx_fragment_pkt_num", |
255 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, | |
256 | {"mac_tx_undermin_pkt_num", | |
257 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, | |
258 | {"mac_tx_jabber_pkt_num", | |
259 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, | |
260 | {"mac_tx_err_all_pkt_num", | |
261 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, | |
262 | {"mac_tx_from_app_good_pkt_num", | |
263 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, | |
264 | {"mac_tx_from_app_bad_pkt_num", | |
265 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, | |
266 | {"mac_rx_fragment_pkt_num", | |
267 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, | |
268 | {"mac_rx_undermin_pkt_num", | |
269 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, | |
270 | {"mac_rx_jabber_pkt_num", | |
271 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, | |
272 | {"mac_rx_fcs_err_pkt_num", | |
273 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, | |
274 | {"mac_rx_send_app_good_pkt_num", | |
275 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, | |
276 | {"mac_rx_send_app_bad_pkt_num", | |
277 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} | |
46a3df9f S |
278 | }; |
279 | ||
635bfb58 FL |
280 | static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { |
281 | { | |
282 | .flags = HCLGE_MAC_MGR_MASK_VLAN_B, | |
283 | .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), | |
284 | .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), | |
285 | .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), | |
286 | .i_port_bitmap = 0x1, | |
287 | }, | |
288 | }; | |
289 | ||
46a3df9f S |
290 | static int hclge_mac_update_stats(struct hclge_dev *hdev) |
291 | { | |
b42874e4 | 292 | #define HCLGE_MAC_CMD_NUM 21 |
46a3df9f S |
293 | #define HCLGE_RTN_DATA_NUM 4 |
294 | ||
295 | u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); | |
296 | struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; | |
a90bb9a5 | 297 | __le64 *desc_data; |
46a3df9f S |
298 | int i, k, n; |
299 | int ret; | |
300 | ||
301 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); | |
302 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); | |
303 | if (ret) { | |
304 | dev_err(&hdev->pdev->dev, | |
305 | "Get MAC pkt stats fail, status = %d.\n", ret); | |
306 | ||
307 | return ret; | |
308 | } | |
309 | ||
310 | for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { | |
311 | if (unlikely(i == 0)) { | |
a90bb9a5 | 312 | desc_data = (__le64 *)(&desc[i].data[0]); |
46a3df9f S |
313 | n = HCLGE_RTN_DATA_NUM - 2; |
314 | } else { | |
a90bb9a5 | 315 | desc_data = (__le64 *)(&desc[i]); |
46a3df9f S |
316 | n = HCLGE_RTN_DATA_NUM; |
317 | } | |
318 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 319 | *data++ += le64_to_cpu(*desc_data); |
46a3df9f S |
320 | desc_data++; |
321 | } | |
322 | } | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
327 | static int hclge_tqps_update_stats(struct hnae3_handle *handle) | |
328 | { | |
329 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
330 | struct hclge_vport *vport = hclge_get_vport(handle); | |
331 | struct hclge_dev *hdev = vport->back; | |
332 | struct hnae3_queue *queue; | |
333 | struct hclge_desc desc[1]; | |
334 | struct hclge_tqp *tqp; | |
335 | int ret, i; | |
336 | ||
337 | for (i = 0; i < kinfo->num_tqps; i++) { | |
338 | queue = handle->kinfo.tqp[i]; | |
339 | tqp = container_of(queue, struct hclge_tqp, q); | |
340 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
341 | hclge_cmd_setup_basic_desc(&desc[0], | |
342 | HCLGE_OPC_QUERY_RX_STATUS, | |
343 | true); | |
344 | ||
a90bb9a5 | 345 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
346 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
347 | if (ret) { | |
348 | dev_err(&hdev->pdev->dev, | |
349 | "Query tqp stat fail, status = %d,queue = %d\n", | |
350 | ret, i); | |
351 | return ret; | |
352 | } | |
353 | tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += | |
93991b65 | 354 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
355 | } |
356 | ||
357 | for (i = 0; i < kinfo->num_tqps; i++) { | |
358 | queue = handle->kinfo.tqp[i]; | |
359 | tqp = container_of(queue, struct hclge_tqp, q); | |
360 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
361 | hclge_cmd_setup_basic_desc(&desc[0], | |
362 | HCLGE_OPC_QUERY_TX_STATUS, | |
363 | true); | |
364 | ||
a90bb9a5 | 365 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
366 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
367 | if (ret) { | |
368 | dev_err(&hdev->pdev->dev, | |
369 | "Query tqp stat fail, status = %d,queue = %d\n", | |
370 | ret, i); | |
371 | return ret; | |
372 | } | |
373 | tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += | |
93991b65 | 374 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
375 | } |
376 | ||
377 | return 0; | |
378 | } | |
379 | ||
380 | static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) | |
381 | { | |
382 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
383 | struct hclge_tqp *tqp; | |
384 | u64 *buff = data; | |
385 | int i; | |
386 | ||
387 | for (i = 0; i < kinfo->num_tqps; i++) { | |
388 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 389 | *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; |
46a3df9f S |
390 | } |
391 | ||
392 | for (i = 0; i < kinfo->num_tqps; i++) { | |
393 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 394 | *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; |
46a3df9f S |
395 | } |
396 | ||
397 | return buff; | |
398 | } | |
399 | ||
400 | static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) | |
401 | { | |
402 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
403 | ||
404 | return kinfo->num_tqps * (2); | |
405 | } | |
406 | ||
407 | static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) | |
408 | { | |
409 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
410 | u8 *buff = data; | |
411 | int i = 0; | |
412 | ||
413 | for (i = 0; i < kinfo->num_tqps; i++) { | |
414 | struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], | |
415 | struct hclge_tqp, q); | |
eedff8c0 | 416 | snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", |
46a3df9f S |
417 | tqp->index); |
418 | buff = buff + ETH_GSTRING_LEN; | |
419 | } | |
420 | ||
421 | for (i = 0; i < kinfo->num_tqps; i++) { | |
422 | struct hclge_tqp *tqp = container_of(kinfo->tqp[i], | |
423 | struct hclge_tqp, q); | |
eedff8c0 | 424 | snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", |
46a3df9f S |
425 | tqp->index); |
426 | buff = buff + ETH_GSTRING_LEN; | |
427 | } | |
428 | ||
429 | return buff; | |
430 | } | |
431 | ||
432 | static u64 *hclge_comm_get_stats(void *comm_stats, | |
433 | const struct hclge_comm_stats_str strs[], | |
434 | int size, u64 *data) | |
435 | { | |
436 | u64 *buf = data; | |
437 | u32 i; | |
438 | ||
439 | for (i = 0; i < size; i++) | |
440 | buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); | |
441 | ||
442 | return buf + size; | |
443 | } | |
444 | ||
445 | static u8 *hclge_comm_get_strings(u32 stringset, | |
446 | const struct hclge_comm_stats_str strs[], | |
447 | int size, u8 *data) | |
448 | { | |
449 | char *buff = (char *)data; | |
450 | u32 i; | |
451 | ||
452 | if (stringset != ETH_SS_STATS) | |
453 | return buff; | |
454 | ||
455 | for (i = 0; i < size; i++) { | |
456 | snprintf(buff, ETH_GSTRING_LEN, | |
457 | strs[i].desc); | |
458 | buff = buff + ETH_GSTRING_LEN; | |
459 | } | |
460 | ||
461 | return (u8 *)buff; | |
462 | } | |
463 | ||
464 | static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, | |
465 | struct net_device_stats *net_stats) | |
466 | { | |
467 | net_stats->tx_dropped = 0; | |
f3426583 | 468 | net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 469 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; |
c36317be | 470 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
471 | |
472 | net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; | |
473 | net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; | |
474 | ||
c36317be | 475 | net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
476 | net_stats->rx_length_errors = |
477 | hw_stats->mac_stats.mac_rx_undersize_pkt_num; | |
478 | net_stats->rx_length_errors += | |
f3426583 | 479 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 480 | net_stats->rx_over_errors = |
f3426583 | 481 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f S |
482 | } |
483 | ||
484 | static void hclge_update_stats_for_all(struct hclge_dev *hdev) | |
485 | { | |
486 | struct hnae3_handle *handle; | |
487 | int status; | |
488 | ||
489 | handle = &hdev->vport[0].nic; | |
490 | if (handle->client) { | |
491 | status = hclge_tqps_update_stats(handle); | |
492 | if (status) { | |
493 | dev_err(&hdev->pdev->dev, | |
494 | "Update TQPS stats fail, status = %d.\n", | |
495 | status); | |
496 | } | |
497 | } | |
498 | ||
499 | status = hclge_mac_update_stats(hdev); | |
500 | if (status) | |
501 | dev_err(&hdev->pdev->dev, | |
502 | "Update MAC stats fail, status = %d.\n", status); | |
503 | ||
46a3df9f S |
504 | hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); |
505 | } | |
506 | ||
507 | static void hclge_update_stats(struct hnae3_handle *handle, | |
508 | struct net_device_stats *net_stats) | |
509 | { | |
510 | struct hclge_vport *vport = hclge_get_vport(handle); | |
511 | struct hclge_dev *hdev = vport->back; | |
512 | struct hclge_hw_stats *hw_stats = &hdev->hw_stats; | |
513 | int status; | |
514 | ||
7a5d2a39 JS |
515 | if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) |
516 | return; | |
517 | ||
46a3df9f S |
518 | status = hclge_mac_update_stats(hdev); |
519 | if (status) | |
520 | dev_err(&hdev->pdev->dev, | |
521 | "Update MAC stats fail, status = %d.\n", | |
522 | status); | |
523 | ||
46a3df9f S |
524 | status = hclge_tqps_update_stats(handle); |
525 | if (status) | |
526 | dev_err(&hdev->pdev->dev, | |
527 | "Update TQPS stats fail, status = %d.\n", | |
528 | status); | |
529 | ||
530 | hclge_update_netstat(hw_stats, net_stats); | |
7a5d2a39 JS |
531 | |
532 | clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); | |
46a3df9f S |
533 | } |
534 | ||
535 | static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) | |
536 | { | |
86957272 FL |
537 | #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ |
538 | HNAE3_SUPPORT_PHY_LOOPBACK |\ | |
539 | HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ | |
540 | HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) | |
46a3df9f S |
541 | |
542 | struct hclge_vport *vport = hclge_get_vport(handle); | |
543 | struct hclge_dev *hdev = vport->back; | |
544 | int count = 0; | |
545 | ||
546 | /* Loopback test support rules: | |
547 | * mac: only GE mode support | |
548 | * serdes: all mac mode will support include GE/XGE/LGE/CGE | |
549 | * phy: only support when phy device exist on board | |
550 | */ | |
551 | if (stringset == ETH_SS_TEST) { | |
552 | /* clear loopback bit flags at first */ | |
553 | handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); | |
735f1df8 | 554 | if (hdev->pdev->revision >= 0x21 || |
86957272 | 555 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || |
46a3df9f S |
556 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || |
557 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { | |
558 | count += 1; | |
67b8c316 | 559 | handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; |
46a3df9f | 560 | } |
e006bb00 | 561 | |
86957272 FL |
562 | count += 2; |
563 | handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; | |
564 | handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; | |
46a3df9f S |
565 | } else if (stringset == ETH_SS_STATS) { |
566 | count = ARRAY_SIZE(g_mac_stats_string) + | |
46a3df9f S |
567 | hclge_tqps_get_sset_count(handle, stringset); |
568 | } | |
569 | ||
570 | return count; | |
571 | } | |
572 | ||
573 | static void hclge_get_strings(struct hnae3_handle *handle, | |
574 | u32 stringset, | |
575 | u8 *data) | |
576 | { | |
577 | u8 *p = (char *)data; | |
578 | int size; | |
579 | ||
580 | if (stringset == ETH_SS_STATS) { | |
581 | size = ARRAY_SIZE(g_mac_stats_string); | |
582 | p = hclge_comm_get_strings(stringset, | |
583 | g_mac_stats_string, | |
584 | size, | |
585 | p); | |
46a3df9f S |
586 | p = hclge_tqps_get_strings(handle, p); |
587 | } else if (stringset == ETH_SS_TEST) { | |
67b8c316 | 588 | if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { |
46a3df9f | 589 | memcpy(p, |
67b8c316 | 590 | hns3_nic_test_strs[HNAE3_LOOP_APP], |
46a3df9f S |
591 | ETH_GSTRING_LEN); |
592 | p += ETH_GSTRING_LEN; | |
593 | } | |
86957272 | 594 | if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { |
46a3df9f | 595 | memcpy(p, |
86957272 FL |
596 | hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], |
597 | ETH_GSTRING_LEN); | |
598 | p += ETH_GSTRING_LEN; | |
599 | } | |
600 | if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { | |
601 | memcpy(p, | |
602 | hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], | |
46a3df9f S |
603 | ETH_GSTRING_LEN); |
604 | p += ETH_GSTRING_LEN; | |
605 | } | |
606 | if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { | |
607 | memcpy(p, | |
e05cfaaf | 608 | hns3_nic_test_strs[HNAE3_LOOP_PHY], |
46a3df9f S |
609 | ETH_GSTRING_LEN); |
610 | p += ETH_GSTRING_LEN; | |
611 | } | |
612 | } | |
613 | } | |
614 | ||
615 | static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) | |
616 | { | |
617 | struct hclge_vport *vport = hclge_get_vport(handle); | |
618 | struct hclge_dev *hdev = vport->back; | |
619 | u64 *p; | |
620 | ||
621 | p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, | |
622 | g_mac_stats_string, | |
623 | ARRAY_SIZE(g_mac_stats_string), | |
624 | data); | |
46a3df9f S |
625 | p = hclge_tqps_get_stats(handle, p); |
626 | } | |
627 | ||
628 | static int hclge_parse_func_status(struct hclge_dev *hdev, | |
d44f9b63 | 629 | struct hclge_func_status_cmd *status) |
46a3df9f S |
630 | { |
631 | if (!(status->pf_state & HCLGE_PF_STATE_DONE)) | |
632 | return -EINVAL; | |
633 | ||
634 | /* Set the pf to main pf */ | |
635 | if (status->pf_state & HCLGE_PF_STATE_MAIN) | |
636 | hdev->flag |= HCLGE_FLAG_MAIN; | |
637 | else | |
638 | hdev->flag &= ~HCLGE_FLAG_MAIN; | |
639 | ||
46a3df9f S |
640 | return 0; |
641 | } | |
642 | ||
643 | static int hclge_query_function_status(struct hclge_dev *hdev) | |
644 | { | |
d44f9b63 | 645 | struct hclge_func_status_cmd *req; |
46a3df9f S |
646 | struct hclge_desc desc; |
647 | int timeout = 0; | |
648 | int ret; | |
649 | ||
650 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); | |
d44f9b63 | 651 | req = (struct hclge_func_status_cmd *)desc.data; |
46a3df9f S |
652 | |
653 | do { | |
654 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
655 | if (ret) { | |
656 | dev_err(&hdev->pdev->dev, | |
657 | "query function status failed %d.\n", | |
658 | ret); | |
659 | ||
660 | return ret; | |
661 | } | |
662 | ||
663 | /* Check pf reset is done */ | |
664 | if (req->pf_state) | |
665 | break; | |
666 | usleep_range(1000, 2000); | |
667 | } while (timeout++ < 5); | |
668 | ||
669 | ret = hclge_parse_func_status(hdev, req); | |
670 | ||
671 | return ret; | |
672 | } | |
673 | ||
674 | static int hclge_query_pf_resource(struct hclge_dev *hdev) | |
675 | { | |
d44f9b63 | 676 | struct hclge_pf_res_cmd *req; |
46a3df9f S |
677 | struct hclge_desc desc; |
678 | int ret; | |
679 | ||
680 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); | |
681 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
682 | if (ret) { | |
683 | dev_err(&hdev->pdev->dev, | |
684 | "query pf resource failed %d.\n", ret); | |
685 | return ret; | |
686 | } | |
687 | ||
d44f9b63 | 688 | req = (struct hclge_pf_res_cmd *)desc.data; |
46a3df9f S |
689 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); |
690 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; | |
691 | ||
cb799ea5 YL |
692 | if (req->tx_buf_size) |
693 | hdev->tx_buf_size = | |
694 | __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; | |
695 | else | |
696 | hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; | |
697 | ||
89b4e1bb YL |
698 | hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); |
699 | ||
cb799ea5 YL |
700 | if (req->dv_buf_size) |
701 | hdev->dv_buf_size = | |
702 | __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; | |
703 | else | |
704 | hdev->dv_buf_size = HCLGE_DEFAULT_DV; | |
705 | ||
89b4e1bb YL |
706 | hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); |
707 | ||
e92a0843 | 708 | if (hnae3_dev_roce_supported(hdev)) { |
5355e6d3 JS |
709 | hdev->roce_base_msix_offset = |
710 | hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), | |
711 | HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); | |
887c3820 | 712 | hdev->num_roce_msi = |
ccc23ef3 PL |
713 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
714 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
715 | |
716 | /* PF should have NIC vectors and Roce vectors, | |
717 | * NIC vectors are queued before Roce vectors. | |
718 | */ | |
5355e6d3 JS |
719 | hdev->num_msi = hdev->num_roce_msi + |
720 | hdev->roce_base_msix_offset; | |
46a3df9f S |
721 | } else { |
722 | hdev->num_msi = | |
ccc23ef3 PL |
723 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
724 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
725 | } |
726 | ||
727 | return 0; | |
728 | } | |
729 | ||
730 | static int hclge_parse_speed(int speed_cmd, int *speed) | |
731 | { | |
732 | switch (speed_cmd) { | |
733 | case 6: | |
734 | *speed = HCLGE_MAC_SPEED_10M; | |
735 | break; | |
736 | case 7: | |
737 | *speed = HCLGE_MAC_SPEED_100M; | |
738 | break; | |
739 | case 0: | |
740 | *speed = HCLGE_MAC_SPEED_1G; | |
741 | break; | |
742 | case 1: | |
743 | *speed = HCLGE_MAC_SPEED_10G; | |
744 | break; | |
745 | case 2: | |
746 | *speed = HCLGE_MAC_SPEED_25G; | |
747 | break; | |
748 | case 3: | |
749 | *speed = HCLGE_MAC_SPEED_40G; | |
750 | break; | |
751 | case 4: | |
752 | *speed = HCLGE_MAC_SPEED_50G; | |
753 | break; | |
754 | case 5: | |
755 | *speed = HCLGE_MAC_SPEED_100G; | |
756 | break; | |
757 | default: | |
758 | return -EINVAL; | |
759 | } | |
760 | ||
761 | return 0; | |
762 | } | |
763 | ||
d92ceae9 FL |
764 | static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, |
765 | u8 speed_ability) | |
766 | { | |
767 | unsigned long *supported = hdev->hw.mac.supported; | |
768 | ||
769 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) | |
770 | set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, | |
771 | supported); | |
772 | ||
773 | if (speed_ability & HCLGE_SUPPORT_10G_BIT) | |
774 | set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, | |
775 | supported); | |
776 | ||
777 | if (speed_ability & HCLGE_SUPPORT_25G_BIT) | |
778 | set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, | |
779 | supported); | |
780 | ||
781 | if (speed_ability & HCLGE_SUPPORT_50G_BIT) | |
782 | set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, | |
783 | supported); | |
784 | ||
785 | if (speed_ability & HCLGE_SUPPORT_100G_BIT) | |
786 | set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, | |
787 | supported); | |
788 | ||
789 | set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); | |
790 | set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); | |
791 | } | |
792 | ||
793 | static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) | |
794 | { | |
795 | u8 media_type = hdev->hw.mac.media_type; | |
796 | ||
797 | if (media_type != HNAE3_MEDIA_TYPE_FIBER) | |
798 | return; | |
799 | ||
800 | hclge_parse_fiber_link_mode(hdev, speed_ability); | |
801 | } | |
802 | ||
46a3df9f S |
803 | static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) |
804 | { | |
d44f9b63 | 805 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
806 | u64 mac_addr_tmp_high; |
807 | u64 mac_addr_tmp; | |
808 | int i; | |
809 | ||
d44f9b63 | 810 | req = (struct hclge_cfg_param_cmd *)desc[0].data; |
46a3df9f S |
811 | |
812 | /* get the configuration */ | |
ccc23ef3 PL |
813 | cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), |
814 | HCLGE_CFG_VMDQ_M, | |
815 | HCLGE_CFG_VMDQ_S); | |
816 | cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
817 | HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); | |
818 | cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
819 | HCLGE_CFG_TQP_DESC_N_M, | |
820 | HCLGE_CFG_TQP_DESC_N_S); | |
821 | ||
822 | cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
823 | HCLGE_CFG_PHY_ADDR_M, | |
824 | HCLGE_CFG_PHY_ADDR_S); | |
825 | cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
826 | HCLGE_CFG_MEDIA_TP_M, | |
827 | HCLGE_CFG_MEDIA_TP_S); | |
828 | cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
829 | HCLGE_CFG_RX_BUF_LEN_M, | |
830 | HCLGE_CFG_RX_BUF_LEN_S); | |
46a3df9f S |
831 | /* get mac_address */ |
832 | mac_addr_tmp = __le32_to_cpu(req->param[2]); | |
ccc23ef3 PL |
833 | mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), |
834 | HCLGE_CFG_MAC_ADDR_H_M, | |
835 | HCLGE_CFG_MAC_ADDR_H_S); | |
46a3df9f S |
836 | |
837 | mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; | |
838 | ||
ccc23ef3 PL |
839 | cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), |
840 | HCLGE_CFG_DEFAULT_SPEED_M, | |
841 | HCLGE_CFG_DEFAULT_SPEED_S); | |
842 | cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), | |
843 | HCLGE_CFG_RSS_SIZE_M, | |
844 | HCLGE_CFG_RSS_SIZE_S); | |
c408e202 | 845 | |
46a3df9f S |
846 | for (i = 0; i < ETH_ALEN; i++) |
847 | cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; | |
848 | ||
d44f9b63 | 849 | req = (struct hclge_cfg_param_cmd *)desc[1].data; |
46a3df9f | 850 | cfg->numa_node_map = __le32_to_cpu(req->param[0]); |
d92ceae9 | 851 | |
ccc23ef3 PL |
852 | cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), |
853 | HCLGE_CFG_SPEED_ABILITY_M, | |
854 | HCLGE_CFG_SPEED_ABILITY_S); | |
2da5ec58 JS |
855 | cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), |
856 | HCLGE_CFG_UMV_TBL_SPACE_M, | |
857 | HCLGE_CFG_UMV_TBL_SPACE_S); | |
858 | if (!cfg->umv_space) | |
859 | cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; | |
46a3df9f S |
860 | } |
861 | ||
862 | /* hclge_get_cfg: query the static parameter from flash | |
863 | * @hdev: pointer to struct hclge_dev | |
864 | * @hcfg: the config structure to be getted | |
865 | */ | |
866 | static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) | |
867 | { | |
868 | struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; | |
d44f9b63 | 869 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
870 | int i, ret; |
871 | ||
872 | for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { | |
a90bb9a5 YL |
873 | u32 offset = 0; |
874 | ||
d44f9b63 | 875 | req = (struct hclge_cfg_param_cmd *)desc[i].data; |
46a3df9f S |
876 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, |
877 | true); | |
ccc23ef3 PL |
878 | hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, |
879 | HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); | |
46a3df9f | 880 | /* Len should be united by 4 bytes when send to hardware */ |
ccc23ef3 PL |
881 | hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, |
882 | HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); | |
a90bb9a5 | 883 | req->offset = cpu_to_le32(offset); |
46a3df9f S |
884 | } |
885 | ||
886 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); | |
887 | if (ret) { | |
90415e85 | 888 | dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); |
46a3df9f S |
889 | return ret; |
890 | } | |
891 | ||
892 | hclge_parse_cfg(hcfg, desc); | |
90415e85 | 893 | |
46a3df9f S |
894 | return 0; |
895 | } | |
896 | ||
897 | static int hclge_get_cap(struct hclge_dev *hdev) | |
898 | { | |
899 | int ret; | |
900 | ||
901 | ret = hclge_query_function_status(hdev); | |
902 | if (ret) { | |
903 | dev_err(&hdev->pdev->dev, | |
904 | "query function status error %d.\n", ret); | |
905 | return ret; | |
906 | } | |
907 | ||
908 | /* get pf resource */ | |
909 | ret = hclge_query_pf_resource(hdev); | |
90415e85 JS |
910 | if (ret) |
911 | dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); | |
46a3df9f | 912 | |
90415e85 | 913 | return ret; |
46a3df9f S |
914 | } |
915 | ||
916 | static int hclge_configure(struct hclge_dev *hdev) | |
917 | { | |
918 | struct hclge_cfg cfg; | |
919 | int ret, i; | |
920 | ||
921 | ret = hclge_get_cfg(hdev, &cfg); | |
922 | if (ret) { | |
923 | dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); | |
924 | return ret; | |
925 | } | |
926 | ||
927 | hdev->num_vmdq_vport = cfg.vmdq_vport_num; | |
928 | hdev->base_tqp_pid = 0; | |
c408e202 | 929 | hdev->rss_size_max = cfg.rss_size_max; |
46a3df9f | 930 | hdev->rx_buf_len = cfg.rx_buf_len; |
fbbb1536 | 931 | ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); |
46a3df9f | 932 | hdev->hw.mac.media_type = cfg.media_type; |
2a4776e1 | 933 | hdev->hw.mac.phy_addr = cfg.phy_addr; |
46a3df9f S |
934 | hdev->num_desc = cfg.tqp_desc_num; |
935 | hdev->tm_info.num_pg = 1; | |
cacde272 | 936 | hdev->tc_max = cfg.tc_num; |
46a3df9f | 937 | hdev->tm_info.hw_pfc_map = 0; |
2da5ec58 | 938 | hdev->wanted_umv_size = cfg.umv_space; |
46a3df9f S |
939 | |
940 | ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); | |
941 | if (ret) { | |
942 | dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); | |
943 | return ret; | |
944 | } | |
945 | ||
d92ceae9 FL |
946 | hclge_parse_link_mode(hdev, cfg.speed_ability); |
947 | ||
cacde272 YL |
948 | if ((hdev->tc_max > HNAE3_MAX_TC) || |
949 | (hdev->tc_max < 1)) { | |
46a3df9f | 950 | dev_warn(&hdev->pdev->dev, "TC num = %d.\n", |
cacde272 YL |
951 | hdev->tc_max); |
952 | hdev->tc_max = 1; | |
46a3df9f S |
953 | } |
954 | ||
cacde272 YL |
955 | /* Dev does not support DCB */ |
956 | if (!hnae3_dev_dcb_supported(hdev)) { | |
957 | hdev->tc_max = 1; | |
958 | hdev->pfc_max = 0; | |
959 | } else { | |
960 | hdev->pfc_max = hdev->tc_max; | |
961 | } | |
962 | ||
dd0a65e8 | 963 | hdev->tm_info.num_tc = 1; |
cacde272 | 964 | |
46a3df9f | 965 | /* Currently not support uncontiuous tc */ |
cacde272 | 966 | for (i = 0; i < hdev->tm_info.num_tc; i++) |
ccc23ef3 | 967 | hnae3_set_bit(hdev->hw_tc_map, i, 1); |
46a3df9f | 968 | |
f8362fe1 | 969 | hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; |
46a3df9f S |
970 | |
971 | return ret; | |
972 | } | |
973 | ||
974 | static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, | |
975 | int tso_mss_max) | |
976 | { | |
d44f9b63 | 977 | struct hclge_cfg_tso_status_cmd *req; |
46a3df9f | 978 | struct hclge_desc desc; |
a90bb9a5 | 979 | u16 tso_mss; |
46a3df9f S |
980 | |
981 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); | |
982 | ||
d44f9b63 | 983 | req = (struct hclge_cfg_tso_status_cmd *)desc.data; |
a90bb9a5 YL |
984 | |
985 | tso_mss = 0; | |
ccc23ef3 PL |
986 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
987 | HCLGE_TSO_MSS_MIN_S, tso_mss_min); | |
a90bb9a5 YL |
988 | req->tso_mss_min = cpu_to_le16(tso_mss); |
989 | ||
990 | tso_mss = 0; | |
ccc23ef3 PL |
991 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
992 | HCLGE_TSO_MSS_MIN_S, tso_mss_max); | |
a90bb9a5 | 993 | req->tso_mss_max = cpu_to_le16(tso_mss); |
46a3df9f S |
994 | |
995 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
996 | } | |
997 | ||
73f88b00 PL |
998 | static int hclge_config_gro(struct hclge_dev *hdev, bool en) |
999 | { | |
1000 | struct hclge_cfg_gro_status_cmd *req; | |
1001 | struct hclge_desc desc; | |
1002 | int ret; | |
1003 | ||
1004 | if (!hnae3_dev_gro_supported(hdev)) | |
1005 | return 0; | |
1006 | ||
1007 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); | |
1008 | req = (struct hclge_cfg_gro_status_cmd *)desc.data; | |
1009 | ||
1010 | req->gro_en = cpu_to_le16(en ? 1 : 0); | |
1011 | ||
1012 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1013 | if (ret) | |
1014 | dev_err(&hdev->pdev->dev, | |
1015 | "GRO hardware config cmd failed, ret = %d\n", ret); | |
1016 | ||
1017 | return ret; | |
1018 | } | |
1019 | ||
46a3df9f S |
1020 | static int hclge_alloc_tqps(struct hclge_dev *hdev) |
1021 | { | |
1022 | struct hclge_tqp *tqp; | |
1023 | int i; | |
1024 | ||
1025 | hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, | |
1026 | sizeof(struct hclge_tqp), GFP_KERNEL); | |
1027 | if (!hdev->htqp) | |
1028 | return -ENOMEM; | |
1029 | ||
1030 | tqp = hdev->htqp; | |
1031 | ||
1032 | for (i = 0; i < hdev->num_tqps; i++) { | |
1033 | tqp->dev = &hdev->pdev->dev; | |
1034 | tqp->index = i; | |
1035 | ||
1036 | tqp->q.ae_algo = &ae_algo; | |
1037 | tqp->q.buf_size = hdev->rx_buf_len; | |
1038 | tqp->q.desc_num = hdev->num_desc; | |
1039 | tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + | |
1040 | i * HCLGE_TQP_REG_SIZE; | |
1041 | ||
1042 | tqp++; | |
1043 | } | |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | ||
1048 | static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, | |
1049 | u16 tqp_pid, u16 tqp_vid, bool is_pf) | |
1050 | { | |
d44f9b63 | 1051 | struct hclge_tqp_map_cmd *req; |
46a3df9f S |
1052 | struct hclge_desc desc; |
1053 | int ret; | |
1054 | ||
1055 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); | |
1056 | ||
d44f9b63 | 1057 | req = (struct hclge_tqp_map_cmd *)desc.data; |
46a3df9f | 1058 | req->tqp_id = cpu_to_le16(tqp_pid); |
a90bb9a5 | 1059 | req->tqp_vf = func_id; |
46a3df9f S |
1060 | req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | |
1061 | 1 << HCLGE_TQP_MAP_EN_B; | |
1062 | req->tqp_vid = cpu_to_le16(tqp_vid); | |
1063 | ||
1064 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 JS |
1065 | if (ret) |
1066 | dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); | |
46a3df9f | 1067 | |
90415e85 | 1068 | return ret; |
46a3df9f S |
1069 | } |
1070 | ||
81356b1f | 1071 | static int hclge_assign_tqp(struct hclge_vport *vport) |
46a3df9f | 1072 | { |
81356b1f | 1073 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
46a3df9f | 1074 | struct hclge_dev *hdev = vport->back; |
7df7dad6 | 1075 | int i, alloced; |
46a3df9f S |
1076 | |
1077 | for (i = 0, alloced = 0; i < hdev->num_tqps && | |
81356b1f | 1078 | alloced < kinfo->num_tqps; i++) { |
46a3df9f S |
1079 | if (!hdev->htqp[i].alloced) { |
1080 | hdev->htqp[i].q.handle = &vport->nic; | |
1081 | hdev->htqp[i].q.tqp_index = alloced; | |
81356b1f YL |
1082 | hdev->htqp[i].q.desc_num = kinfo->num_desc; |
1083 | kinfo->tqp[alloced] = &hdev->htqp[i].q; | |
46a3df9f | 1084 | hdev->htqp[i].alloced = true; |
46a3df9f S |
1085 | alloced++; |
1086 | } | |
1087 | } | |
81356b1f | 1088 | vport->alloc_tqps = kinfo->num_tqps; |
46a3df9f S |
1089 | |
1090 | return 0; | |
1091 | } | |
1092 | ||
81356b1f YL |
1093 | static int hclge_knic_setup(struct hclge_vport *vport, |
1094 | u16 num_tqps, u16 num_desc) | |
46a3df9f S |
1095 | { |
1096 | struct hnae3_handle *nic = &vport->nic; | |
1097 | struct hnae3_knic_private_info *kinfo = &nic->kinfo; | |
1098 | struct hclge_dev *hdev = vport->back; | |
1099 | int i, ret; | |
1100 | ||
81356b1f | 1101 | kinfo->num_desc = num_desc; |
46a3df9f S |
1102 | kinfo->rx_buf_len = hdev->rx_buf_len; |
1103 | kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); | |
1104 | kinfo->rss_size | |
1105 | = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); | |
1106 | kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; | |
1107 | ||
1108 | for (i = 0; i < HNAE3_MAX_TC; i++) { | |
1109 | if (hdev->hw_tc_map & BIT(i)) { | |
1110 | kinfo->tc_info[i].enable = true; | |
1111 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; | |
1112 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; | |
1113 | kinfo->tc_info[i].tc = i; | |
1114 | } else { | |
1115 | /* Set to default queue if TC is disable */ | |
1116 | kinfo->tc_info[i].enable = false; | |
1117 | kinfo->tc_info[i].tqp_offset = 0; | |
1118 | kinfo->tc_info[i].tqp_count = 1; | |
1119 | kinfo->tc_info[i].tc = 0; | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, | |
1124 | sizeof(struct hnae3_queue *), GFP_KERNEL); | |
1125 | if (!kinfo->tqp) | |
1126 | return -ENOMEM; | |
1127 | ||
81356b1f | 1128 | ret = hclge_assign_tqp(vport); |
90415e85 | 1129 | if (ret) |
46a3df9f | 1130 | dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); |
46a3df9f | 1131 | |
90415e85 | 1132 | return ret; |
46a3df9f S |
1133 | } |
1134 | ||
7df7dad6 L |
1135 | static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, |
1136 | struct hclge_vport *vport) | |
1137 | { | |
1138 | struct hnae3_handle *nic = &vport->nic; | |
1139 | struct hnae3_knic_private_info *kinfo; | |
1140 | u16 i; | |
1141 | ||
1142 | kinfo = &nic->kinfo; | |
1143 | for (i = 0; i < kinfo->num_tqps; i++) { | |
1144 | struct hclge_tqp *q = | |
1145 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
1146 | bool is_pf; | |
1147 | int ret; | |
1148 | ||
1149 | is_pf = !(vport->vport_id); | |
1150 | ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, | |
1151 | i, is_pf); | |
1152 | if (ret) | |
1153 | return ret; | |
1154 | } | |
1155 | ||
1156 | return 0; | |
1157 | } | |
1158 | ||
1159 | static int hclge_map_tqp(struct hclge_dev *hdev) | |
1160 | { | |
1161 | struct hclge_vport *vport = hdev->vport; | |
1162 | u16 i, num_vport; | |
1163 | ||
1164 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1165 | for (i = 0; i < num_vport; i++) { | |
1166 | int ret; | |
1167 | ||
1168 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
1169 | if (ret) | |
1170 | return ret; | |
1171 | ||
1172 | vport++; | |
1173 | } | |
1174 | ||
1175 | return 0; | |
1176 | } | |
1177 | ||
46a3df9f S |
1178 | static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) |
1179 | { | |
1180 | /* this would be initialized later */ | |
1181 | } | |
1182 | ||
1183 | static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) | |
1184 | { | |
1185 | struct hnae3_handle *nic = &vport->nic; | |
1186 | struct hclge_dev *hdev = vport->back; | |
1187 | int ret; | |
1188 | ||
1189 | nic->pdev = hdev->pdev; | |
1190 | nic->ae_algo = &ae_algo; | |
1191 | nic->numa_node_mask = hdev->numa_node_mask; | |
1192 | ||
1193 | if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { | |
81356b1f | 1194 | ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); |
46a3df9f S |
1195 | if (ret) { |
1196 | dev_err(&hdev->pdev->dev, "knic setup failed %d\n", | |
1197 | ret); | |
1198 | return ret; | |
1199 | } | |
1200 | } else { | |
1201 | hclge_unic_setup(vport, num_tqps); | |
1202 | } | |
1203 | ||
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | static int hclge_alloc_vport(struct hclge_dev *hdev) | |
1208 | { | |
1209 | struct pci_dev *pdev = hdev->pdev; | |
1210 | struct hclge_vport *vport; | |
1211 | u32 tqp_main_vport; | |
1212 | u32 tqp_per_vport; | |
1213 | int num_vport, i; | |
1214 | int ret; | |
1215 | ||
1216 | /* We need to alloc a vport for main NIC of PF */ | |
1217 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1218 | ||
b76edfb2 HT |
1219 | if (hdev->num_tqps < num_vport) { |
1220 | dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", | |
1221 | hdev->num_tqps, num_vport); | |
1222 | return -EINVAL; | |
1223 | } | |
46a3df9f S |
1224 | |
1225 | /* Alloc the same number of TQPs for every vport */ | |
1226 | tqp_per_vport = hdev->num_tqps / num_vport; | |
1227 | tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; | |
1228 | ||
1229 | vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), | |
1230 | GFP_KERNEL); | |
1231 | if (!vport) | |
1232 | return -ENOMEM; | |
1233 | ||
1234 | hdev->vport = vport; | |
1235 | hdev->num_alloc_vport = num_vport; | |
1236 | ||
bc59f827 FL |
1237 | if (IS_ENABLED(CONFIG_PCI_IOV)) |
1238 | hdev->num_alloc_vfs = hdev->num_req_vfs; | |
46a3df9f S |
1239 | |
1240 | for (i = 0; i < num_vport; i++) { | |
1241 | vport->back = hdev; | |
1242 | vport->vport_id = i; | |
b2c04029 | 1243 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; |
46a3df9f S |
1244 | |
1245 | if (i == 0) | |
1246 | ret = hclge_vport_setup(vport, tqp_main_vport); | |
1247 | else | |
1248 | ret = hclge_vport_setup(vport, tqp_per_vport); | |
1249 | if (ret) { | |
1250 | dev_err(&pdev->dev, | |
1251 | "vport setup failed for vport %d, %d\n", | |
1252 | i, ret); | |
1253 | return ret; | |
1254 | } | |
1255 | ||
1256 | vport++; | |
1257 | } | |
1258 | ||
1259 | return 0; | |
1260 | } | |
1261 | ||
acf61ecd YL |
1262 | static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, |
1263 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1264 | { |
1265 | /* TX buffer size is unit by 128 byte */ | |
1266 | #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 | |
1267 | #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) | |
d44f9b63 | 1268 | struct hclge_tx_buff_alloc_cmd *req; |
46a3df9f S |
1269 | struct hclge_desc desc; |
1270 | int ret; | |
1271 | u8 i; | |
1272 | ||
d44f9b63 | 1273 | req = (struct hclge_tx_buff_alloc_cmd *)desc.data; |
46a3df9f S |
1274 | |
1275 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); | |
9ffe79a9 | 1276 | for (i = 0; i < HCLGE_TC_NUM; i++) { |
acf61ecd | 1277 | u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 | 1278 | |
46a3df9f S |
1279 | req->tx_pkt_buff[i] = |
1280 | cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | | |
1281 | HCLGE_BUF_SIZE_UPDATE_EN_MSK); | |
9ffe79a9 | 1282 | } |
46a3df9f S |
1283 | |
1284 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 1285 | if (ret) |
46a3df9f S |
1286 | dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", |
1287 | ret); | |
46a3df9f | 1288 | |
90415e85 | 1289 | return ret; |
46a3df9f S |
1290 | } |
1291 | ||
acf61ecd YL |
1292 | static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, |
1293 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1294 | { |
acf61ecd | 1295 | int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); |
46a3df9f | 1296 | |
90415e85 JS |
1297 | if (ret) |
1298 | dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); | |
46a3df9f | 1299 | |
90415e85 | 1300 | return ret; |
46a3df9f S |
1301 | } |
1302 | ||
1303 | static int hclge_get_tc_num(struct hclge_dev *hdev) | |
1304 | { | |
1305 | int i, cnt = 0; | |
1306 | ||
1307 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1308 | if (hdev->hw_tc_map & BIT(i)) | |
1309 | cnt++; | |
1310 | return cnt; | |
1311 | } | |
1312 | ||
1313 | static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) | |
1314 | { | |
1315 | int i, cnt = 0; | |
1316 | ||
1317 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1318 | if (hdev->hw_tc_map & BIT(i) && | |
1319 | hdev->tm_info.hw_pfc_map & BIT(i)) | |
1320 | cnt++; | |
1321 | return cnt; | |
1322 | } | |
1323 | ||
1324 | /* Get the number of pfc enabled TCs, which have private buffer */ | |
acf61ecd YL |
1325 | static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, |
1326 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1327 | { |
1328 | struct hclge_priv_buf *priv; | |
1329 | int i, cnt = 0; | |
1330 | ||
1331 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1332 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1333 | if ((hdev->tm_info.hw_pfc_map & BIT(i)) && |
1334 | priv->enable) | |
1335 | cnt++; | |
1336 | } | |
1337 | ||
1338 | return cnt; | |
1339 | } | |
1340 | ||
1341 | /* Get the number of pfc disabled TCs, which have private buffer */ | |
acf61ecd YL |
1342 | static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, |
1343 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1344 | { |
1345 | struct hclge_priv_buf *priv; | |
1346 | int i, cnt = 0; | |
1347 | ||
1348 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1349 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1350 | if (hdev->hw_tc_map & BIT(i) && |
1351 | !(hdev->tm_info.hw_pfc_map & BIT(i)) && | |
1352 | priv->enable) | |
1353 | cnt++; | |
1354 | } | |
1355 | ||
1356 | return cnt; | |
1357 | } | |
1358 | ||
acf61ecd | 1359 | static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
46a3df9f S |
1360 | { |
1361 | struct hclge_priv_buf *priv; | |
1362 | u32 rx_priv = 0; | |
1363 | int i; | |
1364 | ||
1365 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1366 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1367 | if (priv->enable) |
1368 | rx_priv += priv->buf_size; | |
1369 | } | |
1370 | return rx_priv; | |
1371 | } | |
1372 | ||
acf61ecd | 1373 | static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
9ffe79a9 YL |
1374 | { |
1375 | u32 i, total_tx_size = 0; | |
1376 | ||
1377 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
acf61ecd | 1378 | total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 YL |
1379 | |
1380 | return total_tx_size; | |
1381 | } | |
1382 | ||
acf61ecd YL |
1383 | static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, |
1384 | struct hclge_pkt_buf_alloc *buf_alloc, | |
1385 | u32 rx_all) | |
46a3df9f S |
1386 | { |
1387 | u32 shared_buf_min, shared_buf_tc, shared_std; | |
1388 | int tc_num, pfc_enable_num; | |
89b4e1bb | 1389 | u32 shared_buf, aligned_mps; |
46a3df9f S |
1390 | u32 rx_priv; |
1391 | int i; | |
1392 | ||
1393 | tc_num = hclge_get_tc_num(hdev); | |
1394 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); | |
89b4e1bb | 1395 | aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
46a3df9f | 1396 | |
d221df4e | 1397 | if (hnae3_dev_dcb_supported(hdev)) |
89b4e1bb | 1398 | shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size; |
d221df4e | 1399 | else |
89b4e1bb | 1400 | shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF |
cb799ea5 | 1401 | + hdev->dv_buf_size; |
d221df4e | 1402 | |
89b4e1bb YL |
1403 | shared_buf_tc = pfc_enable_num * aligned_mps + |
1404 | (tc_num - pfc_enable_num) * aligned_mps / 2 + | |
1405 | aligned_mps; | |
aa771075 YL |
1406 | shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), |
1407 | HCLGE_BUF_SIZE_UNIT); | |
46a3df9f | 1408 | |
acf61ecd | 1409 | rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); |
aa771075 | 1410 | if (rx_all < rx_priv + shared_std) |
46a3df9f S |
1411 | return false; |
1412 | ||
89b4e1bb | 1413 | shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); |
acf61ecd | 1414 | buf_alloc->s_buf.buf_size = shared_buf; |
cb799ea5 YL |
1415 | if (hnae3_dev_dcb_supported(hdev)) { |
1416 | buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; | |
1417 | buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high | |
89b4e1bb | 1418 | - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); |
cb799ea5 | 1419 | } else { |
89b4e1bb | 1420 | buf_alloc->s_buf.self.high = aligned_mps + |
cb799ea5 | 1421 | HCLGE_NON_DCB_ADDITIONAL_BUF; |
89b4e1bb YL |
1422 | buf_alloc->s_buf.self.low = |
1423 | roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); | |
cb799ea5 | 1424 | } |
46a3df9f S |
1425 | |
1426 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
1427 | if ((hdev->hw_tc_map & BIT(i)) && | |
1428 | (hdev->tm_info.hw_pfc_map & BIT(i))) { | |
89b4e1bb YL |
1429 | buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; |
1430 | buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; | |
46a3df9f | 1431 | } else { |
acf61ecd | 1432 | buf_alloc->s_buf.tc_thrd[i].low = 0; |
89b4e1bb | 1433 | buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; |
46a3df9f S |
1434 | } |
1435 | } | |
1436 | ||
1437 | return true; | |
1438 | } | |
1439 | ||
acf61ecd YL |
1440 | static int hclge_tx_buffer_calc(struct hclge_dev *hdev, |
1441 | struct hclge_pkt_buf_alloc *buf_alloc) | |
9ffe79a9 YL |
1442 | { |
1443 | u32 i, total_size; | |
1444 | ||
1445 | total_size = hdev->pkt_buf_size; | |
1446 | ||
1447 | /* alloc tx buffer for all enabled tc */ | |
1448 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1449 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
9ffe79a9 | 1450 | |
cb799ea5 | 1451 | if (total_size < hdev->tx_buf_size) |
9ffe79a9 YL |
1452 | return -ENOMEM; |
1453 | ||
1454 | if (hdev->hw_tc_map & BIT(i)) | |
cb799ea5 | 1455 | priv->tx_buf_size = hdev->tx_buf_size; |
9ffe79a9 YL |
1456 | else |
1457 | priv->tx_buf_size = 0; | |
1458 | ||
1459 | total_size -= priv->tx_buf_size; | |
1460 | } | |
1461 | ||
1462 | return 0; | |
1463 | } | |
1464 | ||
46a3df9f S |
1465 | /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs |
1466 | * @hdev: pointer to struct hclge_dev | |
acf61ecd | 1467 | * @buf_alloc: pointer to buffer calculation data |
46a3df9f S |
1468 | * @return: 0: calculate sucessful, negative: fail |
1469 | */ | |
1db9b1bf YL |
1470 | static int hclge_rx_buffer_calc(struct hclge_dev *hdev, |
1471 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1472 | { |
d748274d | 1473 | u32 rx_all = hdev->pkt_buf_size, aligned_mps; |
46a3df9f S |
1474 | int no_pfc_priv_num, pfc_priv_num; |
1475 | struct hclge_priv_buf *priv; | |
1476 | int i; | |
1477 | ||
d748274d | 1478 | aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
acf61ecd | 1479 | rx_all -= hclge_get_tx_buff_alloced(buf_alloc); |
9ffe79a9 | 1480 | |
d602a525 YL |
1481 | /* When DCB is not supported, rx private |
1482 | * buffer is not allocated. | |
1483 | */ | |
1484 | if (!hnae3_dev_dcb_supported(hdev)) { | |
acf61ecd | 1485 | if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
d602a525 YL |
1486 | return -ENOMEM; |
1487 | ||
1488 | return 0; | |
1489 | } | |
1490 | ||
46a3df9f S |
1491 | /* step 1, try to alloc private buffer for all enabled tc */ |
1492 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1493 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1494 | if (hdev->hw_tc_map & BIT(i)) { |
1495 | priv->enable = 1; | |
1496 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
d748274d | 1497 | priv->wl.low = aligned_mps; |
89b4e1bb YL |
1498 | priv->wl.high = |
1499 | roundup(priv->wl.low + aligned_mps, | |
1500 | HCLGE_BUF_SIZE_UNIT); | |
46a3df9f | 1501 | priv->buf_size = priv->wl.high + |
89b4e1bb | 1502 | hdev->dv_buf_size; |
46a3df9f S |
1503 | } else { |
1504 | priv->wl.low = 0; | |
d748274d | 1505 | priv->wl.high = 2 * aligned_mps; |
cb799ea5 YL |
1506 | priv->buf_size = priv->wl.high + |
1507 | hdev->dv_buf_size; | |
46a3df9f | 1508 | } |
bb1fe9ea YL |
1509 | } else { |
1510 | priv->enable = 0; | |
1511 | priv->wl.low = 0; | |
1512 | priv->wl.high = 0; | |
1513 | priv->buf_size = 0; | |
46a3df9f S |
1514 | } |
1515 | } | |
1516 | ||
acf61ecd | 1517 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1518 | return 0; |
1519 | ||
1520 | /* step 2, try to decrease the buffer size of | |
1521 | * no pfc TC's private buffer | |
1522 | */ | |
1523 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1524 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f | 1525 | |
bb1fe9ea YL |
1526 | priv->enable = 0; |
1527 | priv->wl.low = 0; | |
1528 | priv->wl.high = 0; | |
1529 | priv->buf_size = 0; | |
1530 | ||
1531 | if (!(hdev->hw_tc_map & BIT(i))) | |
1532 | continue; | |
1533 | ||
1534 | priv->enable = 1; | |
46a3df9f S |
1535 | |
1536 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
89b4e1bb | 1537 | priv->wl.low = 256; |
d748274d | 1538 | priv->wl.high = priv->wl.low + aligned_mps; |
cb799ea5 | 1539 | priv->buf_size = priv->wl.high + hdev->dv_buf_size; |
46a3df9f S |
1540 | } else { |
1541 | priv->wl.low = 0; | |
d748274d | 1542 | priv->wl.high = aligned_mps; |
cb799ea5 | 1543 | priv->buf_size = priv->wl.high + hdev->dv_buf_size; |
46a3df9f S |
1544 | } |
1545 | } | |
1546 | ||
acf61ecd | 1547 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1548 | return 0; |
1549 | ||
1550 | /* step 3, try to reduce the number of pfc disabled TCs, | |
1551 | * which have private buffer | |
1552 | */ | |
1553 | /* get the total no pfc enable TC number, which have private buffer */ | |
acf61ecd | 1554 | no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1555 | |
1556 | /* let the last to be cleared first */ | |
1557 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1558 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1559 | |
1560 | if (hdev->hw_tc_map & BIT(i) && | |
1561 | !(hdev->tm_info.hw_pfc_map & BIT(i))) { | |
1562 | /* Clear the no pfc TC private buffer */ | |
1563 | priv->wl.low = 0; | |
1564 | priv->wl.high = 0; | |
1565 | priv->buf_size = 0; | |
1566 | priv->enable = 0; | |
1567 | no_pfc_priv_num--; | |
1568 | } | |
1569 | ||
acf61ecd | 1570 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1571 | no_pfc_priv_num == 0) |
1572 | break; | |
1573 | } | |
1574 | ||
acf61ecd | 1575 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1576 | return 0; |
1577 | ||
1578 | /* step 4, try to reduce the number of pfc enabled TCs | |
1579 | * which have private buffer. | |
1580 | */ | |
acf61ecd | 1581 | pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1582 | |
1583 | /* let the last to be cleared first */ | |
1584 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1585 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1586 | |
1587 | if (hdev->hw_tc_map & BIT(i) && | |
1588 | hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1589 | /* Reduce the number of pfc TC with private buffer */ | |
1590 | priv->wl.low = 0; | |
1591 | priv->enable = 0; | |
1592 | priv->wl.high = 0; | |
1593 | priv->buf_size = 0; | |
1594 | pfc_priv_num--; | |
1595 | } | |
1596 | ||
acf61ecd | 1597 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1598 | pfc_priv_num == 0) |
1599 | break; | |
1600 | } | |
acf61ecd | 1601 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1602 | return 0; |
1603 | ||
1604 | return -ENOMEM; | |
1605 | } | |
1606 | ||
acf61ecd YL |
1607 | static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, |
1608 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1609 | { |
d44f9b63 | 1610 | struct hclge_rx_priv_buff_cmd *req; |
46a3df9f S |
1611 | struct hclge_desc desc; |
1612 | int ret; | |
1613 | int i; | |
1614 | ||
1615 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); | |
d44f9b63 | 1616 | req = (struct hclge_rx_priv_buff_cmd *)desc.data; |
46a3df9f S |
1617 | |
1618 | /* Alloc private buffer TCs */ | |
1619 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1620 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1621 | |
1622 | req->buf_num[i] = | |
1623 | cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); | |
1624 | req->buf_num[i] |= | |
5bca3b94 | 1625 | cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); |
46a3df9f S |
1626 | } |
1627 | ||
b8c8bf47 | 1628 | req->shared_buf = |
acf61ecd | 1629 | cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | |
b8c8bf47 YL |
1630 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); |
1631 | ||
46a3df9f | 1632 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
90415e85 | 1633 | if (ret) |
46a3df9f S |
1634 | dev_err(&hdev->pdev->dev, |
1635 | "rx private buffer alloc cmd failed %d\n", ret); | |
46a3df9f | 1636 | |
90415e85 | 1637 | return ret; |
46a3df9f S |
1638 | } |
1639 | ||
acf61ecd YL |
1640 | static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, |
1641 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1642 | { |
1643 | struct hclge_rx_priv_wl_buf *req; | |
1644 | struct hclge_priv_buf *priv; | |
1645 | struct hclge_desc desc[2]; | |
1646 | int i, j; | |
1647 | int ret; | |
1648 | ||
1649 | for (i = 0; i < 2; i++) { | |
1650 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, | |
1651 | false); | |
1652 | req = (struct hclge_rx_priv_wl_buf *)desc[i].data; | |
1653 | ||
1654 | /* The first descriptor set the NEXT bit to 1 */ | |
1655 | if (i == 0) | |
1656 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1657 | else | |
1658 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1659 | ||
1660 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
acf61ecd YL |
1661 | u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; |
1662 | ||
1663 | priv = &buf_alloc->priv_buf[idx]; | |
46a3df9f S |
1664 | req->tc_wl[j].high = |
1665 | cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); | |
1666 | req->tc_wl[j].high |= | |
ee6b549b | 1667 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1668 | req->tc_wl[j].low = |
1669 | cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); | |
1670 | req->tc_wl[j].low |= | |
ee6b549b | 1671 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1672 | } |
1673 | } | |
1674 | ||
1675 | /* Send 2 descriptor at one time */ | |
1676 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
90415e85 | 1677 | if (ret) |
46a3df9f S |
1678 | dev_err(&hdev->pdev->dev, |
1679 | "rx private waterline config cmd failed %d\n", | |
1680 | ret); | |
90415e85 | 1681 | return ret; |
46a3df9f S |
1682 | } |
1683 | ||
acf61ecd YL |
1684 | static int hclge_common_thrd_config(struct hclge_dev *hdev, |
1685 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1686 | { |
acf61ecd | 1687 | struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; |
46a3df9f S |
1688 | struct hclge_rx_com_thrd *req; |
1689 | struct hclge_desc desc[2]; | |
1690 | struct hclge_tc_thrd *tc; | |
1691 | int i, j; | |
1692 | int ret; | |
1693 | ||
1694 | for (i = 0; i < 2; i++) { | |
1695 | hclge_cmd_setup_basic_desc(&desc[i], | |
1696 | HCLGE_OPC_RX_COM_THRD_ALLOC, false); | |
1697 | req = (struct hclge_rx_com_thrd *)&desc[i].data; | |
1698 | ||
1699 | /* The first descriptor set the NEXT bit to 1 */ | |
1700 | if (i == 0) | |
1701 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1702 | else | |
1703 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1704 | ||
1705 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
1706 | tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; | |
1707 | ||
1708 | req->com_thrd[j].high = | |
1709 | cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); | |
1710 | req->com_thrd[j].high |= | |
ee6b549b | 1711 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1712 | req->com_thrd[j].low = |
1713 | cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); | |
1714 | req->com_thrd[j].low |= | |
ee6b549b | 1715 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1716 | } |
1717 | } | |
1718 | ||
1719 | /* Send 2 descriptors at one time */ | |
1720 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
90415e85 | 1721 | if (ret) |
46a3df9f S |
1722 | dev_err(&hdev->pdev->dev, |
1723 | "common threshold config cmd failed %d\n", ret); | |
90415e85 | 1724 | return ret; |
46a3df9f S |
1725 | } |
1726 | ||
acf61ecd YL |
1727 | static int hclge_common_wl_config(struct hclge_dev *hdev, |
1728 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1729 | { |
acf61ecd | 1730 | struct hclge_shared_buf *buf = &buf_alloc->s_buf; |
46a3df9f S |
1731 | struct hclge_rx_com_wl *req; |
1732 | struct hclge_desc desc; | |
1733 | int ret; | |
1734 | ||
1735 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); | |
1736 | ||
1737 | req = (struct hclge_rx_com_wl *)desc.data; | |
1738 | req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); | |
ee6b549b | 1739 | req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1740 | |
1741 | req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); | |
ee6b549b | 1742 | req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1743 | |
1744 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 1745 | if (ret) |
46a3df9f S |
1746 | dev_err(&hdev->pdev->dev, |
1747 | "common waterline config cmd failed %d\n", ret); | |
930ff2f6 | 1748 | |
90415e85 | 1749 | return ret; |
46a3df9f S |
1750 | } |
1751 | ||
1752 | int hclge_buffer_alloc(struct hclge_dev *hdev) | |
1753 | { | |
acf61ecd | 1754 | struct hclge_pkt_buf_alloc *pkt_buf; |
46a3df9f S |
1755 | int ret; |
1756 | ||
acf61ecd YL |
1757 | pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); |
1758 | if (!pkt_buf) | |
46a3df9f S |
1759 | return -ENOMEM; |
1760 | ||
acf61ecd | 1761 | ret = hclge_tx_buffer_calc(hdev, pkt_buf); |
9ffe79a9 YL |
1762 | if (ret) { |
1763 | dev_err(&hdev->pdev->dev, | |
1764 | "could not calc tx buffer size for all TCs %d\n", ret); | |
acf61ecd | 1765 | goto out; |
9ffe79a9 YL |
1766 | } |
1767 | ||
acf61ecd | 1768 | ret = hclge_tx_buffer_alloc(hdev, pkt_buf); |
46a3df9f S |
1769 | if (ret) { |
1770 | dev_err(&hdev->pdev->dev, | |
1771 | "could not alloc tx buffers %d\n", ret); | |
acf61ecd | 1772 | goto out; |
46a3df9f S |
1773 | } |
1774 | ||
acf61ecd | 1775 | ret = hclge_rx_buffer_calc(hdev, pkt_buf); |
46a3df9f S |
1776 | if (ret) { |
1777 | dev_err(&hdev->pdev->dev, | |
1778 | "could not calc rx priv buffer size for all TCs %d\n", | |
1779 | ret); | |
acf61ecd | 1780 | goto out; |
46a3df9f S |
1781 | } |
1782 | ||
acf61ecd | 1783 | ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); |
46a3df9f S |
1784 | if (ret) { |
1785 | dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", | |
1786 | ret); | |
acf61ecd | 1787 | goto out; |
46a3df9f S |
1788 | } |
1789 | ||
2daf4a65 | 1790 | if (hnae3_dev_dcb_supported(hdev)) { |
acf61ecd | 1791 | ret = hclge_rx_priv_wl_config(hdev, pkt_buf); |
2daf4a65 YL |
1792 | if (ret) { |
1793 | dev_err(&hdev->pdev->dev, | |
1794 | "could not configure rx private waterline %d\n", | |
1795 | ret); | |
acf61ecd | 1796 | goto out; |
2daf4a65 | 1797 | } |
46a3df9f | 1798 | |
acf61ecd | 1799 | ret = hclge_common_thrd_config(hdev, pkt_buf); |
2daf4a65 YL |
1800 | if (ret) { |
1801 | dev_err(&hdev->pdev->dev, | |
1802 | "could not configure common threshold %d\n", | |
1803 | ret); | |
acf61ecd | 1804 | goto out; |
2daf4a65 | 1805 | } |
46a3df9f S |
1806 | } |
1807 | ||
acf61ecd YL |
1808 | ret = hclge_common_wl_config(hdev, pkt_buf); |
1809 | if (ret) | |
46a3df9f S |
1810 | dev_err(&hdev->pdev->dev, |
1811 | "could not configure common waterline %d\n", ret); | |
46a3df9f | 1812 | |
acf61ecd YL |
1813 | out: |
1814 | kfree(pkt_buf); | |
1815 | return ret; | |
46a3df9f S |
1816 | } |
1817 | ||
1818 | static int hclge_init_roce_base_info(struct hclge_vport *vport) | |
1819 | { | |
1820 | struct hnae3_handle *roce = &vport->roce; | |
1821 | struct hnae3_handle *nic = &vport->nic; | |
1822 | ||
887c3820 | 1823 | roce->rinfo.num_vectors = vport->back->num_roce_msi; |
46a3df9f S |
1824 | |
1825 | if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || | |
1826 | vport->back->num_msi_left == 0) | |
1827 | return -EINVAL; | |
1828 | ||
1829 | roce->rinfo.base_vector = vport->back->roce_base_vector; | |
1830 | ||
1831 | roce->rinfo.netdev = nic->kinfo.netdev; | |
1832 | roce->rinfo.roce_io_base = vport->back->hw.io_base; | |
1833 | ||
1834 | roce->pdev = nic->pdev; | |
1835 | roce->ae_algo = nic->ae_algo; | |
1836 | roce->numa_node_mask = nic->numa_node_mask; | |
1837 | ||
1838 | return 0; | |
1839 | } | |
1840 | ||
887c3820 | 1841 | static int hclge_init_msi(struct hclge_dev *hdev) |
46a3df9f S |
1842 | { |
1843 | struct pci_dev *pdev = hdev->pdev; | |
887c3820 SM |
1844 | int vectors; |
1845 | int i; | |
46a3df9f | 1846 | |
887c3820 SM |
1847 | vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, |
1848 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | |
1849 | if (vectors < 0) { | |
1850 | dev_err(&pdev->dev, | |
1851 | "failed(%d) to allocate MSI/MSI-X vectors\n", | |
1852 | vectors); | |
1853 | return vectors; | |
46a3df9f | 1854 | } |
887c3820 SM |
1855 | if (vectors < hdev->num_msi) |
1856 | dev_warn(&hdev->pdev->dev, | |
1857 | "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", | |
1858 | hdev->num_msi, vectors); | |
46a3df9f | 1859 | |
887c3820 SM |
1860 | hdev->num_msi = vectors; |
1861 | hdev->num_msi_left = vectors; | |
1862 | hdev->base_msi_vector = pdev->irq; | |
46a3df9f | 1863 | hdev->roce_base_vector = hdev->base_msi_vector + |
5355e6d3 | 1864 | hdev->roce_base_msix_offset; |
46a3df9f | 1865 | |
46a3df9f S |
1866 | hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1867 | sizeof(u16), GFP_KERNEL); | |
887c3820 SM |
1868 | if (!hdev->vector_status) { |
1869 | pci_free_irq_vectors(pdev); | |
46a3df9f | 1870 | return -ENOMEM; |
887c3820 | 1871 | } |
46a3df9f S |
1872 | |
1873 | for (i = 0; i < hdev->num_msi; i++) | |
1874 | hdev->vector_status[i] = HCLGE_INVALID_VPORT; | |
1875 | ||
887c3820 SM |
1876 | hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1877 | sizeof(int), GFP_KERNEL); | |
1878 | if (!hdev->vector_irq) { | |
1879 | pci_free_irq_vectors(pdev); | |
1880 | return -ENOMEM; | |
46a3df9f | 1881 | } |
46a3df9f S |
1882 | |
1883 | return 0; | |
1884 | } | |
1885 | ||
1c780066 | 1886 | static u8 hclge_check_speed_dup(u8 duplex, int speed) |
46a3df9f | 1887 | { |
46a3df9f | 1888 | |
1c780066 YL |
1889 | if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) |
1890 | duplex = HCLGE_MAC_FULL; | |
46a3df9f | 1891 | |
1c780066 | 1892 | return duplex; |
46a3df9f S |
1893 | } |
1894 | ||
1c780066 YL |
1895 | static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, |
1896 | u8 duplex) | |
46a3df9f | 1897 | { |
d44f9b63 | 1898 | struct hclge_config_mac_speed_dup_cmd *req; |
46a3df9f S |
1899 | struct hclge_desc desc; |
1900 | int ret; | |
1901 | ||
d44f9b63 | 1902 | req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; |
46a3df9f S |
1903 | |
1904 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); | |
1905 | ||
ccc23ef3 | 1906 | hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); |
46a3df9f S |
1907 | |
1908 | switch (speed) { | |
1909 | case HCLGE_MAC_SPEED_10M: | |
ccc23ef3 PL |
1910 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1911 | HCLGE_CFG_SPEED_S, 6); | |
46a3df9f S |
1912 | break; |
1913 | case HCLGE_MAC_SPEED_100M: | |
ccc23ef3 PL |
1914 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1915 | HCLGE_CFG_SPEED_S, 7); | |
46a3df9f S |
1916 | break; |
1917 | case HCLGE_MAC_SPEED_1G: | |
ccc23ef3 PL |
1918 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1919 | HCLGE_CFG_SPEED_S, 0); | |
46a3df9f S |
1920 | break; |
1921 | case HCLGE_MAC_SPEED_10G: | |
ccc23ef3 PL |
1922 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1923 | HCLGE_CFG_SPEED_S, 1); | |
46a3df9f S |
1924 | break; |
1925 | case HCLGE_MAC_SPEED_25G: | |
ccc23ef3 PL |
1926 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1927 | HCLGE_CFG_SPEED_S, 2); | |
46a3df9f S |
1928 | break; |
1929 | case HCLGE_MAC_SPEED_40G: | |
ccc23ef3 PL |
1930 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1931 | HCLGE_CFG_SPEED_S, 3); | |
46a3df9f S |
1932 | break; |
1933 | case HCLGE_MAC_SPEED_50G: | |
ccc23ef3 PL |
1934 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1935 | HCLGE_CFG_SPEED_S, 4); | |
46a3df9f S |
1936 | break; |
1937 | case HCLGE_MAC_SPEED_100G: | |
ccc23ef3 PL |
1938 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1939 | HCLGE_CFG_SPEED_S, 5); | |
46a3df9f S |
1940 | break; |
1941 | default: | |
d7629e74 | 1942 | dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); |
46a3df9f S |
1943 | return -EINVAL; |
1944 | } | |
1945 | ||
ccc23ef3 PL |
1946 | hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, |
1947 | 1); | |
46a3df9f S |
1948 | |
1949 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1950 | if (ret) { | |
1951 | dev_err(&hdev->pdev->dev, | |
1952 | "mac speed/duplex config cmd failed %d.\n", ret); | |
1953 | return ret; | |
1954 | } | |
1955 | ||
1c780066 YL |
1956 | return 0; |
1957 | } | |
1958 | ||
1959 | int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) | |
1960 | { | |
1961 | int ret; | |
1962 | ||
1963 | duplex = hclge_check_speed_dup(duplex, speed); | |
1964 | if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) | |
1965 | return 0; | |
1966 | ||
1967 | ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); | |
1968 | if (ret) | |
1969 | return ret; | |
1970 | ||
1971 | hdev->hw.mac.speed = speed; | |
1972 | hdev->hw.mac.duplex = duplex; | |
46a3df9f S |
1973 | |
1974 | return 0; | |
1975 | } | |
1976 | ||
1977 | static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, | |
1978 | u8 duplex) | |
1979 | { | |
1980 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1981 | struct hclge_dev *hdev = vport->back; | |
1982 | ||
1983 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex); | |
1984 | } | |
1985 | ||
46a3df9f S |
1986 | static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) |
1987 | { | |
d44f9b63 | 1988 | struct hclge_config_auto_neg_cmd *req; |
46a3df9f | 1989 | struct hclge_desc desc; |
a90bb9a5 | 1990 | u32 flag = 0; |
46a3df9f S |
1991 | int ret; |
1992 | ||
1993 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); | |
1994 | ||
d44f9b63 | 1995 | req = (struct hclge_config_auto_neg_cmd *)desc.data; |
ccc23ef3 | 1996 | hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); |
a90bb9a5 | 1997 | req->cfg_an_cmd_flag = cpu_to_le32(flag); |
46a3df9f S |
1998 | |
1999 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 2000 | if (ret) |
46a3df9f S |
2001 | dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", |
2002 | ret); | |
46a3df9f | 2003 | |
90415e85 | 2004 | return ret; |
46a3df9f S |
2005 | } |
2006 | ||
2007 | static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) | |
2008 | { | |
2009 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2010 | struct hclge_dev *hdev = vport->back; | |
2011 | ||
2012 | return hclge_set_autoneg_en(hdev, enable); | |
2013 | } | |
2014 | ||
2015 | static int hclge_get_autoneg(struct hnae3_handle *handle) | |
2016 | { | |
2017 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2018 | struct hclge_dev *hdev = vport->back; | |
9ff804ee FL |
2019 | struct phy_device *phydev = hdev->hw.mac.phydev; |
2020 | ||
2021 | if (phydev) | |
2022 | return phydev->autoneg; | |
46a3df9f S |
2023 | |
2024 | return hdev->hw.mac.autoneg; | |
2025 | } | |
2026 | ||
2027 | static int hclge_mac_init(struct hclge_dev *hdev) | |
2028 | { | |
2029 | struct hclge_mac *mac = &hdev->hw.mac; | |
2030 | int ret; | |
2031 | ||
0daa6e88 | 2032 | hdev->support_sfp_query = true; |
1c780066 YL |
2033 | hdev->hw.mac.duplex = HCLGE_MAC_FULL; |
2034 | ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, | |
2035 | hdev->hw.mac.duplex); | |
46a3df9f S |
2036 | if (ret) { |
2037 | dev_err(&hdev->pdev->dev, | |
2038 | "Config mac speed dup fail ret=%d\n", ret); | |
2039 | return ret; | |
2040 | } | |
2041 | ||
2042 | mac->link = 0; | |
2043 | ||
4ee09281 YL |
2044 | ret = hclge_set_mac_mtu(hdev, hdev->mps); |
2045 | if (ret) { | |
2046 | dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); | |
2047 | return ret; | |
2048 | } | |
59bc85ec | 2049 | |
4ee09281 | 2050 | ret = hclge_buffer_alloc(hdev); |
90415e85 | 2051 | if (ret) |
59bc85ec | 2052 | dev_err(&hdev->pdev->dev, |
4ee09281 | 2053 | "allocate buffer fail, ret=%d\n", ret); |
59bc85ec | 2054 | |
90415e85 | 2055 | return ret; |
46a3df9f S |
2056 | } |
2057 | ||
22fd3468 SM |
2058 | static void hclge_mbx_task_schedule(struct hclge_dev *hdev) |
2059 | { | |
2060 | if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) | |
2061 | schedule_work(&hdev->mbx_service_task); | |
2062 | } | |
2063 | ||
ed4a1bb8 SM |
2064 | static void hclge_reset_task_schedule(struct hclge_dev *hdev) |
2065 | { | |
2066 | if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) | |
2067 | schedule_work(&hdev->rst_service_task); | |
2068 | } | |
2069 | ||
46a3df9f S |
2070 | static void hclge_task_schedule(struct hclge_dev *hdev) |
2071 | { | |
2072 | if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && | |
2073 | !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && | |
2074 | !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) | |
2075 | (void)schedule_work(&hdev->service_task); | |
2076 | } | |
2077 | ||
2078 | static int hclge_get_mac_link_status(struct hclge_dev *hdev) | |
2079 | { | |
d44f9b63 | 2080 | struct hclge_link_status_cmd *req; |
46a3df9f S |
2081 | struct hclge_desc desc; |
2082 | int link_status; | |
2083 | int ret; | |
2084 | ||
2085 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); | |
2086 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2087 | if (ret) { | |
2088 | dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", | |
2089 | ret); | |
2090 | return ret; | |
2091 | } | |
2092 | ||
d44f9b63 | 2093 | req = (struct hclge_link_status_cmd *)desc.data; |
e23e21ea | 2094 | link_status = req->status & HCLGE_LINK_STATUS_UP_M; |
46a3df9f S |
2095 | |
2096 | return !!link_status; | |
2097 | } | |
2098 | ||
2099 | static int hclge_get_mac_phy_link(struct hclge_dev *hdev) | |
2100 | { | |
2101 | int mac_state; | |
2102 | int link_stat; | |
2103 | ||
ed6acb33 PL |
2104 | if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) |
2105 | return 0; | |
2106 | ||
46a3df9f S |
2107 | mac_state = hclge_get_mac_link_status(hdev); |
2108 | ||
2109 | if (hdev->hw.mac.phydev) { | |
7ce8e698 | 2110 | if (hdev->hw.mac.phydev->state == PHY_RUNNING) |
46a3df9f S |
2111 | link_stat = mac_state & |
2112 | hdev->hw.mac.phydev->link; | |
2113 | else | |
2114 | link_stat = 0; | |
2115 | ||
2116 | } else { | |
2117 | link_stat = mac_state; | |
2118 | } | |
2119 | ||
2120 | return !!link_stat; | |
2121 | } | |
2122 | ||
2123 | static void hclge_update_link_status(struct hclge_dev *hdev) | |
2124 | { | |
2125 | struct hnae3_client *client = hdev->nic_client; | |
2126 | struct hnae3_handle *handle; | |
2127 | int state; | |
2128 | int i; | |
2129 | ||
2130 | if (!client) | |
2131 | return; | |
2132 | state = hclge_get_mac_phy_link(hdev); | |
2133 | if (state != hdev->hw.mac.link) { | |
2134 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2135 | handle = &hdev->vport[i].nic; | |
2136 | client->ops->link_status_change(handle, state); | |
2137 | } | |
2138 | hdev->hw.mac.link = state; | |
2139 | } | |
2140 | } | |
2141 | ||
0daa6e88 PL |
2142 | static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) |
2143 | { | |
2144 | struct hclge_sfp_speed_cmd *resp = NULL; | |
2145 | struct hclge_desc desc; | |
2146 | int ret; | |
2147 | ||
2148 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true); | |
2149 | resp = (struct hclge_sfp_speed_cmd *)desc.data; | |
2150 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2151 | if (ret == -EOPNOTSUPP) { | |
2152 | dev_warn(&hdev->pdev->dev, | |
2153 | "IMP do not support get SFP speed %d\n", ret); | |
2154 | return ret; | |
2155 | } else if (ret) { | |
2156 | dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); | |
2157 | return ret; | |
2158 | } | |
2159 | ||
2160 | *speed = resp->sfp_speed; | |
2161 | ||
2162 | return 0; | |
2163 | } | |
2164 | ||
46a3df9f S |
2165 | static int hclge_update_speed_duplex(struct hclge_dev *hdev) |
2166 | { | |
2167 | struct hclge_mac mac = hdev->hw.mac; | |
46a3df9f S |
2168 | int speed; |
2169 | int ret; | |
2170 | ||
0daa6e88 | 2171 | /* get the speed from SFP cmd when phy |
46a3df9f S |
2172 | * doesn't exit. |
2173 | */ | |
0daa6e88 | 2174 | if (mac.phydev) |
46a3df9f S |
2175 | return 0; |
2176 | ||
0daa6e88 PL |
2177 | /* if IMP does not support get SFP/qSFP speed, return directly */ |
2178 | if (!hdev->support_sfp_query) | |
2179 | return 0; | |
46a3df9f | 2180 | |
0daa6e88 PL |
2181 | ret = hclge_get_sfp_speed(hdev, &speed); |
2182 | if (ret == -EOPNOTSUPP) { | |
2183 | hdev->support_sfp_query = false; | |
2184 | return ret; | |
2185 | } else if (ret) { | |
1c780066 | 2186 | return ret; |
46a3df9f S |
2187 | } |
2188 | ||
0daa6e88 PL |
2189 | if (speed == HCLGE_MAC_SPEED_UNKNOWN) |
2190 | return 0; /* do nothing if no SFP */ | |
2191 | ||
2192 | /* must config full duplex for SFP */ | |
2193 | return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); | |
46a3df9f S |
2194 | } |
2195 | ||
2196 | static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) | |
2197 | { | |
2198 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2199 | struct hclge_dev *hdev = vport->back; | |
2200 | ||
2201 | return hclge_update_speed_duplex(hdev); | |
2202 | } | |
2203 | ||
2204 | static int hclge_get_status(struct hnae3_handle *handle) | |
2205 | { | |
2206 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2207 | struct hclge_dev *hdev = vport->back; | |
2208 | ||
2209 | hclge_update_link_status(hdev); | |
2210 | ||
2211 | return hdev->hw.mac.link; | |
2212 | } | |
2213 | ||
d039ef68 | 2214 | static void hclge_service_timer(struct timer_list *t) |
46a3df9f | 2215 | { |
d039ef68 | 2216 | struct hclge_dev *hdev = from_timer(hdev, t, service_timer); |
46a3df9f | 2217 | |
d039ef68 | 2218 | mod_timer(&hdev->service_timer, jiffies + HZ); |
7a5d2a39 | 2219 | hdev->hw_stats.stats_timer++; |
46a3df9f S |
2220 | hclge_task_schedule(hdev); |
2221 | } | |
2222 | ||
2223 | static void hclge_service_complete(struct hclge_dev *hdev) | |
2224 | { | |
2225 | WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); | |
2226 | ||
2227 | /* Flush memory before next watchdog */ | |
2228 | smp_mb__before_atomic(); | |
2229 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); | |
2230 | } | |
2231 | ||
202f2014 SM |
2232 | static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) |
2233 | { | |
00029070 | 2234 | u32 rst_src_reg, cmdq_src_reg, msix_src_reg; |
202f2014 SM |
2235 | |
2236 | /* fetch the events from their corresponding regs */ | |
0bcc9ba1 | 2237 | rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); |
22fd3468 | 2238 | cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); |
00029070 SM |
2239 | msix_src_reg = hclge_read_dev(&hdev->hw, |
2240 | HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); | |
22fd3468 SM |
2241 | |
2242 | /* Assumption: If by any chance reset and mailbox events are reported | |
2243 | * together then we will only process reset event in this go and will | |
2244 | * defer the processing of the mailbox events. Since, we would have not | |
2245 | * cleared RX CMDQ event this time we would receive again another | |
2246 | * interrupt from H/W just for the mailbox. | |
2247 | */ | |
202f2014 SM |
2248 | |
2249 | /* check for vector0 reset event sources */ | |
de2eae69 HT |
2250 | if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { |
2251 | dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); | |
2252 | set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); | |
2253 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2254 | *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2255 | return HCLGE_VECTOR0_EVENT_RST; | |
2256 | } | |
2257 | ||
202f2014 | 2258 | if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { |
1afdb53a | 2259 | dev_info(&hdev->pdev->dev, "global reset interrupt\n"); |
7edef4ce | 2260 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
202f2014 SM |
2261 | set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); |
2262 | *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2263 | return HCLGE_VECTOR0_EVENT_RST; | |
2264 | } | |
2265 | ||
2266 | if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { | |
1afdb53a | 2267 | dev_info(&hdev->pdev->dev, "core reset interrupt\n"); |
7edef4ce | 2268 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
202f2014 SM |
2269 | set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); |
2270 | *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2271 | return HCLGE_VECTOR0_EVENT_RST; | |
2272 | } | |
2273 | ||
00029070 SM |
2274 | /* check for vector0 msix event source */ |
2275 | if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) | |
2276 | return HCLGE_VECTOR0_EVENT_ERR; | |
2277 | ||
22fd3468 SM |
2278 | /* check for vector0 mailbox(=CMDQ RX) event source */ |
2279 | if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { | |
2280 | cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); | |
2281 | *clearval = cmdq_src_reg; | |
2282 | return HCLGE_VECTOR0_EVENT_MBX; | |
2283 | } | |
202f2014 SM |
2284 | |
2285 | return HCLGE_VECTOR0_EVENT_OTHER; | |
2286 | } | |
2287 | ||
2288 | static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, | |
2289 | u32 regclr) | |
2290 | { | |
22fd3468 SM |
2291 | switch (event_type) { |
2292 | case HCLGE_VECTOR0_EVENT_RST: | |
202f2014 | 2293 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); |
22fd3468 SM |
2294 | break; |
2295 | case HCLGE_VECTOR0_EVENT_MBX: | |
2296 | hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); | |
2297 | break; | |
085920ba JS |
2298 | default: |
2299 | break; | |
22fd3468 | 2300 | } |
202f2014 SM |
2301 | } |
2302 | ||
9ab4ad14 XW |
2303 | static void hclge_clear_all_event_cause(struct hclge_dev *hdev) |
2304 | { | |
2305 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, | |
2306 | BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | | |
2307 | BIT(HCLGE_VECTOR0_CORERESET_INT_B) | | |
2308 | BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); | |
2309 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); | |
2310 | } | |
2311 | ||
466b0c00 L |
2312 | static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) |
2313 | { | |
2314 | writel(enable ? 1 : 0, vector->addr); | |
2315 | } | |
2316 | ||
2317 | static irqreturn_t hclge_misc_irq_handle(int irq, void *data) | |
2318 | { | |
2319 | struct hclge_dev *hdev = data; | |
202f2014 SM |
2320 | u32 event_cause; |
2321 | u32 clearval; | |
466b0c00 L |
2322 | |
2323 | hclge_enable_vector(&hdev->misc_vector, false); | |
202f2014 SM |
2324 | event_cause = hclge_check_event_cause(hdev, &clearval); |
2325 | ||
22fd3468 | 2326 | /* vector 0 interrupt is shared with reset and mailbox source events.*/ |
202f2014 | 2327 | switch (event_cause) { |
00029070 SM |
2328 | case HCLGE_VECTOR0_EVENT_ERR: |
2329 | /* we do not know what type of reset is required now. This could | |
2330 | * only be decided after we fetch the type of errors which | |
2331 | * caused this event. Therefore, we will do below for now: | |
2332 | * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we | |
2333 | * have defered type of reset to be used. | |
2334 | * 2. Schedule the reset serivce task. | |
2335 | * 3. When service task receives HNAE3_UNKNOWN_RESET type it | |
2336 | * will fetch the correct type of reset. This would be done | |
2337 | * by first decoding the types of errors. | |
2338 | */ | |
2339 | set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); | |
2340 | /* fall through */ | |
202f2014 | 2341 | case HCLGE_VECTOR0_EVENT_RST: |
ed4a1bb8 | 2342 | hclge_reset_task_schedule(hdev); |
202f2014 | 2343 | break; |
22fd3468 SM |
2344 | case HCLGE_VECTOR0_EVENT_MBX: |
2345 | /* If we are here then, | |
2346 | * 1. Either we are not handling any mbx task and we are not | |
2347 | * scheduled as well | |
2348 | * OR | |
2349 | * 2. We could be handling a mbx task but nothing more is | |
2350 | * scheduled. | |
2351 | * In both cases, we should schedule mbx task as there are more | |
2352 | * mbx messages reported by this interrupt. | |
2353 | */ | |
2354 | hclge_mbx_task_schedule(hdev); | |
40ee4b71 | 2355 | break; |
202f2014 | 2356 | default: |
40ee4b71 YL |
2357 | dev_warn(&hdev->pdev->dev, |
2358 | "received unknown or unhandled event of vector0\n"); | |
202f2014 SM |
2359 | break; |
2360 | } | |
2361 | ||
e9a50d09 | 2362 | /* clear the source of interrupt if it is not cause by reset */ |
c9fc48dc | 2363 | if (event_cause == HCLGE_VECTOR0_EVENT_MBX) { |
e9a50d09 YL |
2364 | hclge_clear_event_cause(hdev, event_cause, clearval); |
2365 | hclge_enable_vector(&hdev->misc_vector, true); | |
2366 | } | |
466b0c00 L |
2367 | |
2368 | return IRQ_HANDLED; | |
2369 | } | |
2370 | ||
2371 | static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) | |
2372 | { | |
1dc5378f PL |
2373 | if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { |
2374 | dev_warn(&hdev->pdev->dev, | |
2375 | "vector(vector_id %d) has been freed.\n", vector_id); | |
2376 | return; | |
2377 | } | |
2378 | ||
466b0c00 L |
2379 | hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; |
2380 | hdev->num_msi_left += 1; | |
2381 | hdev->num_msi_used -= 1; | |
2382 | } | |
2383 | ||
2384 | static void hclge_get_misc_vector(struct hclge_dev *hdev) | |
2385 | { | |
2386 | struct hclge_misc_vector *vector = &hdev->misc_vector; | |
2387 | ||
2388 | vector->vector_irq = pci_irq_vector(hdev->pdev, 0); | |
2389 | ||
2390 | vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; | |
2391 | hdev->vector_status[0] = 0; | |
2392 | ||
2393 | hdev->num_msi_left -= 1; | |
2394 | hdev->num_msi_used += 1; | |
2395 | } | |
2396 | ||
2397 | static int hclge_misc_irq_init(struct hclge_dev *hdev) | |
2398 | { | |
2399 | int ret; | |
2400 | ||
2401 | hclge_get_misc_vector(hdev); | |
2402 | ||
202f2014 SM |
2403 | /* this would be explicitly freed in the end */ |
2404 | ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, | |
2405 | 0, "hclge_misc", hdev); | |
466b0c00 L |
2406 | if (ret) { |
2407 | hclge_free_vector(hdev, 0); | |
2408 | dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", | |
2409 | hdev->misc_vector.vector_irq); | |
2410 | } | |
2411 | ||
2412 | return ret; | |
2413 | } | |
2414 | ||
202f2014 SM |
2415 | static void hclge_misc_irq_uninit(struct hclge_dev *hdev) |
2416 | { | |
2417 | free_irq(hdev->misc_vector.vector_irq, hdev); | |
2418 | hclge_free_vector(hdev, 0); | |
2419 | } | |
2420 | ||
4ed340ab L |
2421 | static int hclge_notify_client(struct hclge_dev *hdev, |
2422 | enum hnae3_reset_notify_type type) | |
2423 | { | |
2424 | struct hnae3_client *client = hdev->nic_client; | |
2425 | u16 i; | |
2426 | ||
2427 | if (!client->ops->reset_notify) | |
2428 | return -EOPNOTSUPP; | |
2429 | ||
2430 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
ad7c82fe | 2431 | struct hnae3_handle *handle = &hdev->vport[i].nic; |
2432 | int ret; | |
b38db544 | 2433 | |
4ed340ab | 2434 | ret = client->ops->reset_notify(handle, type); |
1afdb53a HT |
2435 | if (ret) { |
2436 | dev_err(&hdev->pdev->dev, | |
2437 | "notify nic client failed %d(%d)\n", type, ret); | |
4ed340ab | 2438 | return ret; |
1afdb53a | 2439 | } |
4ed340ab L |
2440 | } |
2441 | ||
6060dc84 | 2442 | return 0; |
4ed340ab L |
2443 | } |
2444 | ||
3db6b633 HT |
2445 | static int hclge_notify_roce_client(struct hclge_dev *hdev, |
2446 | enum hnae3_reset_notify_type type) | |
2447 | { | |
2448 | struct hnae3_client *client = hdev->roce_client; | |
2449 | int ret = 0; | |
2450 | u16 i; | |
2451 | ||
2452 | if (!client) | |
2453 | return 0; | |
2454 | ||
2455 | if (!client->ops->reset_notify) | |
2456 | return -EOPNOTSUPP; | |
2457 | ||
2458 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2459 | struct hnae3_handle *handle = &hdev->vport[i].roce; | |
2460 | ||
2461 | ret = client->ops->reset_notify(handle, type); | |
2462 | if (ret) { | |
2463 | dev_err(&hdev->pdev->dev, | |
2464 | "notify roce client failed %d(%d)", | |
2465 | type, ret); | |
2466 | return ret; | |
2467 | } | |
2468 | } | |
2469 | ||
2470 | return ret; | |
2471 | } | |
2472 | ||
4ed340ab L |
2473 | static int hclge_reset_wait(struct hclge_dev *hdev) |
2474 | { | |
2475 | #define HCLGE_RESET_WATI_MS 100 | |
de2eae69 | 2476 | #define HCLGE_RESET_WAIT_CNT 200 |
4ed340ab L |
2477 | u32 val, reg, reg_bit; |
2478 | u32 cnt = 0; | |
2479 | ||
2480 | switch (hdev->reset_type) { | |
de2eae69 HT |
2481 | case HNAE3_IMP_RESET: |
2482 | reg = HCLGE_GLOBAL_RESET_REG; | |
2483 | reg_bit = HCLGE_IMP_RESET_BIT; | |
2484 | break; | |
4ed340ab L |
2485 | case HNAE3_GLOBAL_RESET: |
2486 | reg = HCLGE_GLOBAL_RESET_REG; | |
2487 | reg_bit = HCLGE_GLOBAL_RESET_BIT; | |
2488 | break; | |
2489 | case HNAE3_CORE_RESET: | |
2490 | reg = HCLGE_GLOBAL_RESET_REG; | |
2491 | reg_bit = HCLGE_CORE_RESET_BIT; | |
2492 | break; | |
2493 | case HNAE3_FUNC_RESET: | |
2494 | reg = HCLGE_FUN_RST_ING; | |
2495 | reg_bit = HCLGE_FUN_RST_ING_B; | |
2496 | break; | |
26977990 HT |
2497 | case HNAE3_FLR_RESET: |
2498 | break; | |
4ed340ab L |
2499 | default: |
2500 | dev_err(&hdev->pdev->dev, | |
2501 | "Wait for unsupported reset type: %d\n", | |
2502 | hdev->reset_type); | |
2503 | return -EINVAL; | |
2504 | } | |
2505 | ||
26977990 HT |
2506 | if (hdev->reset_type == HNAE3_FLR_RESET) { |
2507 | while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && | |
2508 | cnt++ < HCLGE_RESET_WAIT_CNT) | |
2509 | msleep(HCLGE_RESET_WATI_MS); | |
2510 | ||
2511 | if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { | |
2512 | dev_err(&hdev->pdev->dev, | |
2513 | "flr wait timeout: %d\n", cnt); | |
2514 | return -EBUSY; | |
2515 | } | |
2516 | ||
2517 | return 0; | |
2518 | } | |
2519 | ||
4ed340ab | 2520 | val = hclge_read_dev(&hdev->hw, reg); |
ccc23ef3 | 2521 | while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { |
4ed340ab L |
2522 | msleep(HCLGE_RESET_WATI_MS); |
2523 | val = hclge_read_dev(&hdev->hw, reg); | |
2524 | cnt++; | |
2525 | } | |
2526 | ||
4ed340ab L |
2527 | if (cnt >= HCLGE_RESET_WAIT_CNT) { |
2528 | dev_warn(&hdev->pdev->dev, | |
2529 | "Wait for reset timeout: %d\n", hdev->reset_type); | |
2530 | return -EBUSY; | |
2531 | } | |
2532 | ||
2533 | return 0; | |
2534 | } | |
2535 | ||
7885e906 HT |
2536 | static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) |
2537 | { | |
2538 | struct hclge_vf_rst_cmd *req; | |
2539 | struct hclge_desc desc; | |
2540 | ||
2541 | req = (struct hclge_vf_rst_cmd *)desc.data; | |
2542 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); | |
2543 | req->dest_vfid = func_id; | |
2544 | ||
2545 | if (reset) | |
2546 | req->vf_rst = 0x1; | |
2547 | ||
2548 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
2549 | } | |
2550 | ||
2551 | int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) | |
2552 | { | |
2553 | int i; | |
2554 | ||
2555 | for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { | |
2556 | struct hclge_vport *vport = &hdev->vport[i]; | |
2557 | int ret; | |
2558 | ||
2559 | /* Send cmd to set/clear VF's FUNC_RST_ING */ | |
2560 | ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); | |
2561 | if (ret) { | |
2562 | dev_err(&hdev->pdev->dev, | |
5f9c2a66 | 2563 | "set vf(%d) rst failed %d!\n", |
7885e906 HT |
2564 | vport->vport_id, ret); |
2565 | return ret; | |
2566 | } | |
2567 | ||
2568 | if (!reset) | |
2569 | continue; | |
2570 | ||
2571 | /* Inform VF to process the reset. | |
2572 | * hclge_inform_reset_assert_to_vf may fail if VF | |
2573 | * driver is not loaded. | |
2574 | */ | |
2575 | ret = hclge_inform_reset_assert_to_vf(vport); | |
2576 | if (ret) | |
2577 | dev_warn(&hdev->pdev->dev, | |
5f9c2a66 | 2578 | "inform reset to vf(%d) failed %d!\n", |
7885e906 HT |
2579 | vport->vport_id, ret); |
2580 | } | |
2581 | ||
2582 | return 0; | |
2583 | } | |
2584 | ||
13a86fae | 2585 | int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) |
4ed340ab L |
2586 | { |
2587 | struct hclge_desc desc; | |
2588 | struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; | |
2589 | int ret; | |
2590 | ||
2591 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); | |
ccc23ef3 | 2592 | hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); |
4ed340ab L |
2593 | req->fun_reset_vfid = func_id; |
2594 | ||
2595 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2596 | if (ret) | |
2597 | dev_err(&hdev->pdev->dev, | |
2598 | "send function reset cmd fail, status =%d\n", ret); | |
2599 | ||
2600 | return ret; | |
2601 | } | |
2602 | ||
d5752031 | 2603 | static void hclge_do_reset(struct hclge_dev *hdev) |
4ed340ab L |
2604 | { |
2605 | struct pci_dev *pdev = hdev->pdev; | |
2606 | u32 val; | |
2607 | ||
d5752031 | 2608 | switch (hdev->reset_type) { |
4ed340ab L |
2609 | case HNAE3_GLOBAL_RESET: |
2610 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
ccc23ef3 | 2611 | hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); |
4ed340ab L |
2612 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2613 | dev_info(&pdev->dev, "Global Reset requested\n"); | |
2614 | break; | |
2615 | case HNAE3_CORE_RESET: | |
2616 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
ccc23ef3 | 2617 | hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); |
4ed340ab L |
2618 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2619 | dev_info(&pdev->dev, "Core Reset requested\n"); | |
2620 | break; | |
2621 | case HNAE3_FUNC_RESET: | |
2622 | dev_info(&pdev->dev, "PF Reset requested\n"); | |
ed4a1bb8 SM |
2623 | /* schedule again to check later */ |
2624 | set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); | |
2625 | hclge_reset_task_schedule(hdev); | |
4ed340ab | 2626 | break; |
26977990 HT |
2627 | case HNAE3_FLR_RESET: |
2628 | dev_info(&pdev->dev, "FLR requested\n"); | |
2629 | /* schedule again to check later */ | |
2630 | set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); | |
2631 | hclge_reset_task_schedule(hdev); | |
2632 | break; | |
4ed340ab L |
2633 | default: |
2634 | dev_warn(&pdev->dev, | |
d5752031 | 2635 | "Unsupported reset type: %d\n", hdev->reset_type); |
4ed340ab L |
2636 | break; |
2637 | } | |
2638 | } | |
2639 | ||
d5752031 SM |
2640 | static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, |
2641 | unsigned long *addr) | |
2642 | { | |
2643 | enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; | |
2644 | ||
00029070 SM |
2645 | /* first, resolve any unknown reset type to the known type(s) */ |
2646 | if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { | |
2647 | /* we will intentionally ignore any errors from this function | |
2648 | * as we will end up in *some* reset request in any case | |
2649 | */ | |
2650 | hclge_handle_hw_msix_error(hdev, addr); | |
2651 | clear_bit(HNAE3_UNKNOWN_RESET, addr); | |
2652 | /* We defered the clearing of the error event which caused | |
2653 | * interrupt since it was not posssible to do that in | |
2654 | * interrupt context (and this is the reason we introduced | |
2655 | * new UNKNOWN reset type). Now, the errors have been | |
2656 | * handled and cleared in hardware we can safely enable | |
2657 | * interrupts. This is an exception to the norm. | |
2658 | */ | |
2659 | hclge_enable_vector(&hdev->misc_vector, true); | |
2660 | } | |
2661 | ||
d5752031 | 2662 | /* return the highest priority reset level amongst all */ |
62aff578 HT |
2663 | if (test_bit(HNAE3_IMP_RESET, addr)) { |
2664 | rst_level = HNAE3_IMP_RESET; | |
2665 | clear_bit(HNAE3_IMP_RESET, addr); | |
2666 | clear_bit(HNAE3_GLOBAL_RESET, addr); | |
2667 | clear_bit(HNAE3_CORE_RESET, addr); | |
2668 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2669 | } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { | |
d5752031 | 2670 | rst_level = HNAE3_GLOBAL_RESET; |
62aff578 HT |
2671 | clear_bit(HNAE3_GLOBAL_RESET, addr); |
2672 | clear_bit(HNAE3_CORE_RESET, addr); | |
2673 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2674 | } else if (test_bit(HNAE3_CORE_RESET, addr)) { | |
d5752031 | 2675 | rst_level = HNAE3_CORE_RESET; |
62aff578 HT |
2676 | clear_bit(HNAE3_CORE_RESET, addr); |
2677 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2678 | } else if (test_bit(HNAE3_FUNC_RESET, addr)) { | |
d5752031 | 2679 | rst_level = HNAE3_FUNC_RESET; |
62aff578 | 2680 | clear_bit(HNAE3_FUNC_RESET, addr); |
26977990 HT |
2681 | } else if (test_bit(HNAE3_FLR_RESET, addr)) { |
2682 | rst_level = HNAE3_FLR_RESET; | |
2683 | clear_bit(HNAE3_FLR_RESET, addr); | |
62aff578 | 2684 | } |
d5752031 SM |
2685 | |
2686 | return rst_level; | |
2687 | } | |
2688 | ||
e9a50d09 YL |
2689 | static void hclge_clear_reset_cause(struct hclge_dev *hdev) |
2690 | { | |
2691 | u32 clearval = 0; | |
2692 | ||
2693 | switch (hdev->reset_type) { | |
2694 | case HNAE3_IMP_RESET: | |
2695 | clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2696 | break; | |
2697 | case HNAE3_GLOBAL_RESET: | |
2698 | clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2699 | break; | |
2700 | case HNAE3_CORE_RESET: | |
2701 | clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2702 | break; | |
2703 | default: | |
e9a50d09 YL |
2704 | break; |
2705 | } | |
2706 | ||
2707 | if (!clearval) | |
2708 | return; | |
2709 | ||
2710 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); | |
2711 | hclge_enable_vector(&hdev->misc_vector, true); | |
2712 | } | |
2713 | ||
7885e906 HT |
2714 | static int hclge_reset_prepare_down(struct hclge_dev *hdev) |
2715 | { | |
2716 | int ret = 0; | |
2717 | ||
2718 | switch (hdev->reset_type) { | |
2719 | case HNAE3_FUNC_RESET: | |
26977990 HT |
2720 | /* fall through */ |
2721 | case HNAE3_FLR_RESET: | |
7885e906 HT |
2722 | ret = hclge_set_all_vf_rst(hdev, true); |
2723 | break; | |
2724 | default: | |
2725 | break; | |
2726 | } | |
2727 | ||
2728 | return ret; | |
2729 | } | |
2730 | ||
48ac80db HT |
2731 | static int hclge_reset_prepare_wait(struct hclge_dev *hdev) |
2732 | { | |
de2eae69 | 2733 | u32 reg_val; |
48ac80db HT |
2734 | int ret = 0; |
2735 | ||
2736 | switch (hdev->reset_type) { | |
2737 | case HNAE3_FUNC_RESET: | |
7885e906 HT |
2738 | /* There is no mechanism for PF to know if VF has stopped IO |
2739 | * for now, just wait 100 ms for VF to stop IO | |
2740 | */ | |
2741 | msleep(100); | |
48ac80db HT |
2742 | ret = hclge_func_reset_cmd(hdev, 0); |
2743 | if (ret) { | |
2744 | dev_err(&hdev->pdev->dev, | |
7707c27b | 2745 | "asserting function reset fail %d!\n", ret); |
48ac80db HT |
2746 | return ret; |
2747 | } | |
2748 | ||
2749 | /* After performaning pf reset, it is not necessary to do the | |
2750 | * mailbox handling or send any command to firmware, because | |
2751 | * any mailbox handling or command to firmware is only valid | |
2752 | * after hclge_cmd_init is called. | |
2753 | */ | |
2754 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2755 | break; | |
26977990 HT |
2756 | case HNAE3_FLR_RESET: |
2757 | /* There is no mechanism for PF to know if VF has stopped IO | |
2758 | * for now, just wait 100 ms for VF to stop IO | |
2759 | */ | |
2760 | msleep(100); | |
2761 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2762 | set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
2763 | break; | |
de2eae69 HT |
2764 | case HNAE3_IMP_RESET: |
2765 | reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); | |
2766 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, | |
2767 | BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); | |
2768 | break; | |
48ac80db HT |
2769 | default: |
2770 | break; | |
2771 | } | |
2772 | ||
2773 | dev_info(&hdev->pdev->dev, "prepare wait ok\n"); | |
2774 | ||
2775 | return ret; | |
2776 | } | |
2777 | ||
1afdb53a HT |
2778 | static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) |
2779 | { | |
2780 | #define MAX_RESET_FAIL_CNT 5 | |
2781 | #define RESET_UPGRADE_DELAY_SEC 10 | |
2782 | ||
2783 | if (hdev->reset_pending) { | |
2784 | dev_info(&hdev->pdev->dev, "Reset pending %lu\n", | |
2785 | hdev->reset_pending); | |
2786 | return true; | |
2787 | } else if ((hdev->reset_type != HNAE3_IMP_RESET) && | |
2788 | (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & | |
2789 | BIT(HCLGE_IMP_RESET_BIT))) { | |
2790 | dev_info(&hdev->pdev->dev, | |
2791 | "reset failed because IMP Reset is pending\n"); | |
2792 | hclge_clear_reset_cause(hdev); | |
2793 | return false; | |
2794 | } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { | |
2795 | hdev->reset_fail_cnt++; | |
2796 | if (is_timeout) { | |
2797 | set_bit(hdev->reset_type, &hdev->reset_pending); | |
2798 | dev_info(&hdev->pdev->dev, | |
2799 | "re-schedule to wait for hw reset done\n"); | |
2800 | return true; | |
2801 | } | |
2802 | ||
2803 | dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); | |
2804 | hclge_clear_reset_cause(hdev); | |
2805 | mod_timer(&hdev->reset_timer, | |
2806 | jiffies + RESET_UPGRADE_DELAY_SEC * HZ); | |
2807 | ||
2808 | return false; | |
2809 | } | |
2810 | ||
2811 | hclge_clear_reset_cause(hdev); | |
2812 | dev_err(&hdev->pdev->dev, "Reset fail!\n"); | |
2813 | return false; | |
2814 | } | |
2815 | ||
7885e906 HT |
2816 | static int hclge_reset_prepare_up(struct hclge_dev *hdev) |
2817 | { | |
2818 | int ret = 0; | |
2819 | ||
2820 | switch (hdev->reset_type) { | |
2821 | case HNAE3_FUNC_RESET: | |
26977990 HT |
2822 | /* fall through */ |
2823 | case HNAE3_FLR_RESET: | |
7885e906 HT |
2824 | ret = hclge_set_all_vf_rst(hdev, false); |
2825 | break; | |
2826 | default: | |
2827 | break; | |
2828 | } | |
2829 | ||
2830 | return ret; | |
2831 | } | |
2832 | ||
d5752031 SM |
2833 | static void hclge_reset(struct hclge_dev *hdev) |
2834 | { | |
7ce98982 | 2835 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); |
1afdb53a HT |
2836 | bool is_timeout = false; |
2837 | int ret; | |
1a45360a | 2838 | |
7ce98982 JS |
2839 | /* Initialize ae_dev reset status as well, in case enet layer wants to |
2840 | * know if device is undergoing reset | |
2841 | */ | |
2842 | ae_dev->reset_type = hdev->reset_type; | |
225c02eb | 2843 | hdev->reset_count++; |
d5752031 | 2844 | /* perform reset of the stack & ae device for a client */ |
1afdb53a HT |
2845 | ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); |
2846 | if (ret) | |
2847 | goto err_reset; | |
2848 | ||
7885e906 HT |
2849 | ret = hclge_reset_prepare_down(hdev); |
2850 | if (ret) | |
2851 | goto err_reset; | |
2852 | ||
47622dc9 | 2853 | rtnl_lock(); |
1afdb53a HT |
2854 | ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
2855 | if (ret) | |
2856 | goto err_reset_lock; | |
d5752031 | 2857 | |
1afdb53a | 2858 | rtnl_unlock(); |
48ac80db | 2859 | |
1afdb53a HT |
2860 | ret = hclge_reset_prepare_wait(hdev); |
2861 | if (ret) | |
2862 | goto err_reset; | |
e9a50d09 | 2863 | |
1afdb53a HT |
2864 | if (hclge_reset_wait(hdev)) { |
2865 | is_timeout = true; | |
2866 | goto err_reset; | |
d5752031 SM |
2867 | } |
2868 | ||
1afdb53a HT |
2869 | ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); |
2870 | if (ret) | |
2871 | goto err_reset; | |
2872 | ||
2873 | rtnl_lock(); | |
2874 | ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); | |
2875 | if (ret) | |
2876 | goto err_reset_lock; | |
2877 | ||
2878 | ret = hclge_reset_ae_dev(hdev->ae_dev); | |
2879 | if (ret) | |
2880 | goto err_reset_lock; | |
2881 | ||
2882 | ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); | |
2883 | if (ret) | |
2884 | goto err_reset_lock; | |
2885 | ||
2886 | hclge_clear_reset_cause(hdev); | |
2887 | ||
7885e906 HT |
2888 | ret = hclge_reset_prepare_up(hdev); |
2889 | if (ret) | |
2890 | goto err_reset_lock; | |
2891 | ||
1afdb53a HT |
2892 | ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); |
2893 | if (ret) | |
2894 | goto err_reset_lock; | |
2895 | ||
47622dc9 | 2896 | rtnl_unlock(); |
3db6b633 | 2897 | |
1afdb53a HT |
2898 | ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); |
2899 | if (ret) | |
2900 | goto err_reset; | |
2901 | ||
2902 | ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); | |
2903 | if (ret) | |
2904 | goto err_reset; | |
2905 | ||
4f4e5a15 HT |
2906 | hdev->last_reset_time = jiffies; |
2907 | hdev->reset_fail_cnt = 0; | |
2908 | ae_dev->reset_type = HNAE3_NONE_RESET; | |
2909 | ||
1afdb53a HT |
2910 | return; |
2911 | ||
2912 | err_reset_lock: | |
2913 | rtnl_unlock(); | |
2914 | err_reset: | |
2915 | if (hclge_reset_err_handle(hdev, is_timeout)) | |
2916 | hclge_reset_task_schedule(hdev); | |
d5752031 SM |
2917 | } |
2918 | ||
538d8ba0 SJ |
2919 | static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) |
2920 | { | |
2921 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
2922 | struct hclge_dev *hdev = ae_dev->priv; | |
2923 | ||
2924 | /* We might end up getting called broadly because of 2 below cases: | |
2925 | * 1. Recoverable error was conveyed through APEI and only way to bring | |
2926 | * normalcy is to reset. | |
2927 | * 2. A new reset request from the stack due to timeout | |
2928 | * | |
2929 | * For the first case,error event might not have ae handle available. | |
2930 | * check if this is a new reset request and we are not here just because | |
4aef908d SM |
2931 | * last reset attempt did not succeed and watchdog hit us again. We will |
2932 | * know this if last reset request did not occur very recently (watchdog | |
2933 | * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) | |
2934 | * In case of new request we reset the "reset level" to PF reset. | |
1a45360a HT |
2935 | * And if it is a repeat reset request of the most recent one then we |
2936 | * want to make sure we throttle the reset request. Therefore, we will | |
2937 | * not allow it again before 3*HZ times. | |
4aef908d | 2938 | */ |
538d8ba0 SJ |
2939 | if (!handle) |
2940 | handle = &hdev->vport[0].nic; | |
2941 | ||
1a2f7bf2 | 2942 | if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) |
1a45360a | 2943 | return; |
2c883d73 | 2944 | else if (hdev->default_reset_request) |
1a2f7bf2 | 2945 | hdev->reset_level = |
2c883d73 HT |
2946 | hclge_get_reset_level(hdev, |
2947 | &hdev->default_reset_request); | |
1a2f7bf2 HT |
2948 | else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) |
2949 | hdev->reset_level = HNAE3_FUNC_RESET; | |
4ed340ab | 2950 | |
4aef908d | 2951 | dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", |
1a2f7bf2 | 2952 | hdev->reset_level); |
4aef908d SM |
2953 | |
2954 | /* request reset & schedule reset task */ | |
1a2f7bf2 | 2955 | set_bit(hdev->reset_level, &hdev->reset_request); |
4aef908d SM |
2956 | hclge_reset_task_schedule(hdev); |
2957 | ||
1a2f7bf2 HT |
2958 | if (hdev->reset_level < HNAE3_GLOBAL_RESET) |
2959 | hdev->reset_level++; | |
4ed340ab L |
2960 | } |
2961 | ||
2c883d73 HT |
2962 | static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, |
2963 | enum hnae3_reset_type rst_type) | |
2964 | { | |
2965 | struct hclge_dev *hdev = ae_dev->priv; | |
2966 | ||
2967 | set_bit(rst_type, &hdev->default_reset_request); | |
2968 | } | |
2969 | ||
1afdb53a HT |
2970 | static void hclge_reset_timer(struct timer_list *t) |
2971 | { | |
2972 | struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); | |
2973 | ||
2974 | dev_info(&hdev->pdev->dev, | |
2975 | "triggering global reset in reset timer\n"); | |
2976 | set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); | |
2977 | hclge_reset_event(hdev->pdev, NULL); | |
2978 | } | |
2979 | ||
4ed340ab L |
2980 | static void hclge_reset_subtask(struct hclge_dev *hdev) |
2981 | { | |
d5752031 SM |
2982 | /* check if there is any ongoing reset in the hardware. This status can |
2983 | * be checked from reset_pending. If there is then, we need to wait for | |
2984 | * hardware to complete reset. | |
2985 | * a. If we are able to figure out in reasonable time that hardware | |
2986 | * has fully resetted then, we can proceed with driver, client | |
2987 | * reset. | |
2988 | * b. else, we can come back later to check this status so re-sched | |
2989 | * now. | |
2990 | */ | |
1a2f7bf2 | 2991 | hdev->last_reset_time = jiffies; |
d5752031 SM |
2992 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); |
2993 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2994 | hclge_reset(hdev); | |
4ed340ab | 2995 | |
d5752031 SM |
2996 | /* check if we got any *new* reset requests to be honored */ |
2997 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); | |
2998 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2999 | hclge_do_reset(hdev); | |
4ed340ab | 3000 | |
4ed340ab L |
3001 | hdev->reset_type = HNAE3_NONE_RESET; |
3002 | } | |
3003 | ||
ed4a1bb8 | 3004 | static void hclge_reset_service_task(struct work_struct *work) |
466b0c00 | 3005 | { |
ed4a1bb8 SM |
3006 | struct hclge_dev *hdev = |
3007 | container_of(work, struct hclge_dev, rst_service_task); | |
3008 | ||
3009 | if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) | |
3010 | return; | |
3011 | ||
3012 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
3013 | ||
4ed340ab | 3014 | hclge_reset_subtask(hdev); |
ed4a1bb8 SM |
3015 | |
3016 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
466b0c00 L |
3017 | } |
3018 | ||
22fd3468 SM |
3019 | static void hclge_mailbox_service_task(struct work_struct *work) |
3020 | { | |
3021 | struct hclge_dev *hdev = | |
3022 | container_of(work, struct hclge_dev, mbx_service_task); | |
3023 | ||
3024 | if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) | |
3025 | return; | |
3026 | ||
3027 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
3028 | ||
3029 | hclge_mbx_handler(hdev); | |
3030 | ||
3031 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
3032 | } | |
3033 | ||
337460de YL |
3034 | static void hclge_update_vport_alive(struct hclge_dev *hdev) |
3035 | { | |
3036 | int i; | |
3037 | ||
3038 | /* start from vport 1 for PF is always alive */ | |
3039 | for (i = 1; i < hdev->num_alloc_vport; i++) { | |
3040 | struct hclge_vport *vport = &hdev->vport[i]; | |
3041 | ||
3042 | if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) | |
3043 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
b2c04029 YL |
3044 | |
3045 | /* If vf is not alive, set to default value */ | |
3046 | if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) | |
3047 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; | |
337460de YL |
3048 | } |
3049 | } | |
3050 | ||
46a3df9f S |
3051 | static void hclge_service_task(struct work_struct *work) |
3052 | { | |
3053 | struct hclge_dev *hdev = | |
3054 | container_of(work, struct hclge_dev, service_task); | |
3055 | ||
7a5d2a39 JS |
3056 | if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { |
3057 | hclge_update_stats_for_all(hdev); | |
3058 | hdev->hw_stats.stats_timer = 0; | |
3059 | } | |
3060 | ||
46a3df9f S |
3061 | hclge_update_speed_duplex(hdev); |
3062 | hclge_update_link_status(hdev); | |
337460de | 3063 | hclge_update_vport_alive(hdev); |
46a3df9f S |
3064 | hclge_service_complete(hdev); |
3065 | } | |
3066 | ||
46a3df9f S |
3067 | struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) |
3068 | { | |
3069 | /* VF handle has no client */ | |
3070 | if (!handle->client) | |
3071 | return container_of(handle, struct hclge_vport, nic); | |
3072 | else if (handle->client->type == HNAE3_CLIENT_ROCE) | |
3073 | return container_of(handle, struct hclge_vport, roce); | |
3074 | else | |
3075 | return container_of(handle, struct hclge_vport, nic); | |
3076 | } | |
3077 | ||
3078 | static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, | |
3079 | struct hnae3_vector_info *vector_info) | |
3080 | { | |
3081 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3082 | struct hnae3_vector_info *vector = vector_info; | |
3083 | struct hclge_dev *hdev = vport->back; | |
3084 | int alloc = 0; | |
3085 | int i, j; | |
3086 | ||
3087 | vector_num = min(hdev->num_msi_left, vector_num); | |
3088 | ||
3089 | for (j = 0; j < vector_num; j++) { | |
3090 | for (i = 1; i < hdev->num_msi; i++) { | |
3091 | if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { | |
3092 | vector->vector = pci_irq_vector(hdev->pdev, i); | |
3093 | vector->io_addr = hdev->hw.io_base + | |
3094 | HCLGE_VECTOR_REG_BASE + | |
3095 | (i - 1) * HCLGE_VECTOR_REG_OFFSET + | |
3096 | vport->vport_id * | |
3097 | HCLGE_VECTOR_VF_OFFSET; | |
3098 | hdev->vector_status[i] = vport->vport_id; | |
887c3820 | 3099 | hdev->vector_irq[i] = vector->vector; |
46a3df9f S |
3100 | |
3101 | vector++; | |
3102 | alloc++; | |
3103 | ||
3104 | break; | |
3105 | } | |
3106 | } | |
3107 | } | |
3108 | hdev->num_msi_left -= alloc; | |
3109 | hdev->num_msi_used += alloc; | |
3110 | ||
3111 | return alloc; | |
3112 | } | |
3113 | ||
3114 | static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) | |
3115 | { | |
3116 | int i; | |
3117 | ||
887c3820 SM |
3118 | for (i = 0; i < hdev->num_msi; i++) |
3119 | if (vector == hdev->vector_irq[i]) | |
3120 | return i; | |
3121 | ||
46a3df9f S |
3122 | return -EINVAL; |
3123 | } | |
3124 | ||
7412200c YL |
3125 | static int hclge_put_vector(struct hnae3_handle *handle, int vector) |
3126 | { | |
3127 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3128 | struct hclge_dev *hdev = vport->back; | |
3129 | int vector_id; | |
3130 | ||
3131 | vector_id = hclge_get_vector_index(hdev, vector); | |
3132 | if (vector_id < 0) { | |
3133 | dev_err(&hdev->pdev->dev, | |
3134 | "Get vector index fail. vector_id =%d\n", vector_id); | |
3135 | return vector_id; | |
3136 | } | |
3137 | ||
3138 | hclge_free_vector(hdev, vector_id); | |
3139 | ||
3140 | return 0; | |
3141 | } | |
3142 | ||
46a3df9f S |
3143 | static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) |
3144 | { | |
3145 | return HCLGE_RSS_KEY_SIZE; | |
3146 | } | |
3147 | ||
3148 | static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) | |
3149 | { | |
3150 | return HCLGE_RSS_IND_TBL_SIZE; | |
3151 | } | |
3152 | ||
46a3df9f S |
3153 | static int hclge_set_rss_algo_key(struct hclge_dev *hdev, |
3154 | const u8 hfunc, const u8 *key) | |
3155 | { | |
d44f9b63 | 3156 | struct hclge_rss_config_cmd *req; |
46a3df9f S |
3157 | struct hclge_desc desc; |
3158 | int key_offset; | |
3159 | int key_size; | |
3160 | int ret; | |
3161 | ||
d44f9b63 | 3162 | req = (struct hclge_rss_config_cmd *)desc.data; |
46a3df9f S |
3163 | |
3164 | for (key_offset = 0; key_offset < 3; key_offset++) { | |
3165 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, | |
3166 | false); | |
3167 | ||
3168 | req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); | |
3169 | req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); | |
3170 | ||
3171 | if (key_offset == 2) | |
3172 | key_size = | |
3173 | HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; | |
3174 | else | |
3175 | key_size = HCLGE_RSS_HASH_KEY_NUM; | |
3176 | ||
3177 | memcpy(req->hash_key, | |
3178 | key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); | |
3179 | ||
3180 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3181 | if (ret) { | |
3182 | dev_err(&hdev->pdev->dev, | |
3183 | "Configure RSS config fail, status = %d\n", | |
3184 | ret); | |
3185 | return ret; | |
3186 | } | |
3187 | } | |
3188 | return 0; | |
3189 | } | |
3190 | ||
dcd4ef5e | 3191 | static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) |
46a3df9f | 3192 | { |
d44f9b63 | 3193 | struct hclge_rss_indirection_table_cmd *req; |
46a3df9f S |
3194 | struct hclge_desc desc; |
3195 | int i, j; | |
3196 | int ret; | |
3197 | ||
d44f9b63 | 3198 | req = (struct hclge_rss_indirection_table_cmd *)desc.data; |
46a3df9f S |
3199 | |
3200 | for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { | |
3201 | hclge_cmd_setup_basic_desc | |
3202 | (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); | |
3203 | ||
a90bb9a5 YL |
3204 | req->start_table_index = |
3205 | cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); | |
3206 | req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); | |
46a3df9f S |
3207 | |
3208 | for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) | |
3209 | req->rss_result[j] = | |
3210 | indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; | |
3211 | ||
3212 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3213 | if (ret) { | |
3214 | dev_err(&hdev->pdev->dev, | |
3215 | "Configure rss indir table fail,status = %d\n", | |
3216 | ret); | |
3217 | return ret; | |
3218 | } | |
3219 | } | |
3220 | return 0; | |
3221 | } | |
3222 | ||
3223 | static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, | |
3224 | u16 *tc_size, u16 *tc_offset) | |
3225 | { | |
d44f9b63 | 3226 | struct hclge_rss_tc_mode_cmd *req; |
46a3df9f S |
3227 | struct hclge_desc desc; |
3228 | int ret; | |
3229 | int i; | |
3230 | ||
3231 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); | |
d44f9b63 | 3232 | req = (struct hclge_rss_tc_mode_cmd *)desc.data; |
46a3df9f S |
3233 | |
3234 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
a90bb9a5 YL |
3235 | u16 mode = 0; |
3236 | ||
ccc23ef3 PL |
3237 | hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); |
3238 | hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, | |
3239 | HCLGE_RSS_TC_SIZE_S, tc_size[i]); | |
3240 | hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, | |
3241 | HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); | |
a90bb9a5 YL |
3242 | |
3243 | req->rss_tc_mode[i] = cpu_to_le16(mode); | |
46a3df9f S |
3244 | } |
3245 | ||
3246 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 3247 | if (ret) |
46a3df9f S |
3248 | dev_err(&hdev->pdev->dev, |
3249 | "Configure rss tc mode fail, status = %d\n", ret); | |
46a3df9f | 3250 | |
90415e85 | 3251 | return ret; |
46a3df9f S |
3252 | } |
3253 | ||
8e4c877d PL |
3254 | static void hclge_get_rss_type(struct hclge_vport *vport) |
3255 | { | |
3256 | if (vport->rss_tuple_sets.ipv4_tcp_en || | |
3257 | vport->rss_tuple_sets.ipv4_udp_en || | |
3258 | vport->rss_tuple_sets.ipv4_sctp_en || | |
3259 | vport->rss_tuple_sets.ipv6_tcp_en || | |
3260 | vport->rss_tuple_sets.ipv6_udp_en || | |
3261 | vport->rss_tuple_sets.ipv6_sctp_en) | |
3262 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; | |
3263 | else if (vport->rss_tuple_sets.ipv4_fragment_en || | |
3264 | vport->rss_tuple_sets.ipv6_fragment_en) | |
3265 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; | |
3266 | else | |
3267 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; | |
3268 | } | |
3269 | ||
46a3df9f S |
3270 | static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) |
3271 | { | |
d44f9b63 | 3272 | struct hclge_rss_input_tuple_cmd *req; |
46a3df9f S |
3273 | struct hclge_desc desc; |
3274 | int ret; | |
3275 | ||
3276 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); | |
3277 | ||
d44f9b63 | 3278 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; |
637053ef YL |
3279 | |
3280 | /* Get the tuple cfg from pf */ | |
3281 | req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; | |
3282 | req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; | |
3283 | req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; | |
3284 | req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; | |
3285 | req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; | |
3286 | req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; | |
3287 | req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; | |
3288 | req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; | |
8e4c877d | 3289 | hclge_get_rss_type(&hdev->vport[0]); |
46a3df9f | 3290 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
90415e85 | 3291 | if (ret) |
46a3df9f S |
3292 | dev_err(&hdev->pdev->dev, |
3293 | "Configure rss input fail, status = %d\n", ret); | |
90415e85 | 3294 | return ret; |
46a3df9f S |
3295 | } |
3296 | ||
3297 | static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, | |
3298 | u8 *key, u8 *hfunc) | |
3299 | { | |
3300 | struct hclge_vport *vport = hclge_get_vport(handle); | |
46a3df9f S |
3301 | int i; |
3302 | ||
3303 | /* Get hash algorithm */ | |
6868d695 JS |
3304 | if (hfunc) { |
3305 | switch (vport->rss_algo) { | |
3306 | case HCLGE_RSS_HASH_ALGO_TOEPLITZ: | |
3307 | *hfunc = ETH_RSS_HASH_TOP; | |
3308 | break; | |
3309 | case HCLGE_RSS_HASH_ALGO_SIMPLE: | |
3310 | *hfunc = ETH_RSS_HASH_XOR; | |
3311 | break; | |
3312 | default: | |
3313 | *hfunc = ETH_RSS_HASH_UNKNOWN; | |
3314 | break; | |
3315 | } | |
3316 | } | |
46a3df9f S |
3317 | |
3318 | /* Get the RSS Key required by the user */ | |
3319 | if (key) | |
3320 | memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
3321 | ||
3322 | /* Get indirect table */ | |
3323 | if (indir) | |
3324 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3325 | indir[i] = vport->rss_indirection_tbl[i]; | |
3326 | ||
3327 | return 0; | |
3328 | } | |
3329 | ||
3330 | static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, | |
3331 | const u8 *key, const u8 hfunc) | |
3332 | { | |
3333 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3334 | struct hclge_dev *hdev = vport->back; | |
3335 | u8 hash_algo; | |
3336 | int ret, i; | |
3337 | ||
3338 | /* Set the RSS Hash Key if specififed by the user */ | |
3339 | if (key) { | |
6868d695 JS |
3340 | switch (hfunc) { |
3341 | case ETH_RSS_HASH_TOP: | |
46a3df9f | 3342 | hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; |
6868d695 JS |
3343 | break; |
3344 | case ETH_RSS_HASH_XOR: | |
3345 | hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; | |
3346 | break; | |
3347 | case ETH_RSS_HASH_NO_CHANGE: | |
3348 | hash_algo = vport->rss_algo; | |
3349 | break; | |
3350 | default: | |
46a3df9f | 3351 | return -EINVAL; |
6868d695 JS |
3352 | } |
3353 | ||
46a3df9f S |
3354 | ret = hclge_set_rss_algo_key(hdev, hash_algo, key); |
3355 | if (ret) | |
3356 | return ret; | |
dcd4ef5e YL |
3357 | |
3358 | /* Update the shadow RSS key with user specified qids */ | |
3359 | memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); | |
3360 | vport->rss_algo = hash_algo; | |
46a3df9f S |
3361 | } |
3362 | ||
3363 | /* Update the shadow RSS table with user specified qids */ | |
3364 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3365 | vport->rss_indirection_tbl[i] = indir[i]; | |
3366 | ||
3367 | /* Update the hardware */ | |
dcd4ef5e | 3368 | return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); |
46a3df9f S |
3369 | } |
3370 | ||
f7db940a L |
3371 | static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) |
3372 | { | |
3373 | u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; | |
3374 | ||
3375 | if (nfc->data & RXH_L4_B_2_3) | |
3376 | hash_sets |= HCLGE_D_PORT_BIT; | |
3377 | else | |
3378 | hash_sets &= ~HCLGE_D_PORT_BIT; | |
3379 | ||
3380 | if (nfc->data & RXH_IP_SRC) | |
3381 | hash_sets |= HCLGE_S_IP_BIT; | |
3382 | else | |
3383 | hash_sets &= ~HCLGE_S_IP_BIT; | |
3384 | ||
3385 | if (nfc->data & RXH_IP_DST) | |
3386 | hash_sets |= HCLGE_D_IP_BIT; | |
3387 | else | |
3388 | hash_sets &= ~HCLGE_D_IP_BIT; | |
3389 | ||
3390 | if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) | |
3391 | hash_sets |= HCLGE_V_TAG_BIT; | |
3392 | ||
3393 | return hash_sets; | |
3394 | } | |
3395 | ||
3396 | static int hclge_set_rss_tuple(struct hnae3_handle *handle, | |
3397 | struct ethtool_rxnfc *nfc) | |
3398 | { | |
3399 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3400 | struct hclge_dev *hdev = vport->back; | |
3401 | struct hclge_rss_input_tuple_cmd *req; | |
3402 | struct hclge_desc desc; | |
3403 | u8 tuple_sets; | |
3404 | int ret; | |
3405 | ||
3406 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | | |
3407 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) | |
3408 | return -EINVAL; | |
3409 | ||
3410 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; | |
637053ef | 3411 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); |
f7db940a | 3412 | |
637053ef YL |
3413 | req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; |
3414 | req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; | |
3415 | req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; | |
3416 | req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; | |
3417 | req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; | |
3418 | req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; | |
3419 | req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; | |
3420 | req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; | |
f7db940a L |
3421 | |
3422 | tuple_sets = hclge_get_rss_hash_bits(nfc); | |
3423 | switch (nfc->flow_type) { | |
3424 | case TCP_V4_FLOW: | |
3425 | req->ipv4_tcp_en = tuple_sets; | |
3426 | break; | |
3427 | case TCP_V6_FLOW: | |
3428 | req->ipv6_tcp_en = tuple_sets; | |
3429 | break; | |
3430 | case UDP_V4_FLOW: | |
3431 | req->ipv4_udp_en = tuple_sets; | |
3432 | break; | |
3433 | case UDP_V6_FLOW: | |
3434 | req->ipv6_udp_en = tuple_sets; | |
3435 | break; | |
3436 | case SCTP_V4_FLOW: | |
3437 | req->ipv4_sctp_en = tuple_sets; | |
3438 | break; | |
3439 | case SCTP_V6_FLOW: | |
3440 | if ((nfc->data & RXH_L4_B_0_1) || | |
3441 | (nfc->data & RXH_L4_B_2_3)) | |
3442 | return -EINVAL; | |
3443 | ||
3444 | req->ipv6_sctp_en = tuple_sets; | |
3445 | break; | |
3446 | case IPV4_FLOW: | |
3447 | req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3448 | break; | |
3449 | case IPV6_FLOW: | |
3450 | req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3451 | break; | |
3452 | default: | |
3453 | return -EINVAL; | |
3454 | } | |
3455 | ||
3456 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
637053ef | 3457 | if (ret) { |
f7db940a L |
3458 | dev_err(&hdev->pdev->dev, |
3459 | "Set rss tuple fail, status = %d\n", ret); | |
637053ef YL |
3460 | return ret; |
3461 | } | |
f7db940a | 3462 | |
637053ef YL |
3463 | vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; |
3464 | vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; | |
3465 | vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; | |
3466 | vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; | |
3467 | vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; | |
3468 | vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; | |
3469 | vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; | |
3470 | vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; | |
8e4c877d | 3471 | hclge_get_rss_type(vport); |
637053ef | 3472 | return 0; |
f7db940a L |
3473 | } |
3474 | ||
07d29954 L |
3475 | static int hclge_get_rss_tuple(struct hnae3_handle *handle, |
3476 | struct ethtool_rxnfc *nfc) | |
3477 | { | |
3478 | struct hclge_vport *vport = hclge_get_vport(handle); | |
07d29954 | 3479 | u8 tuple_sets; |
07d29954 L |
3480 | |
3481 | nfc->data = 0; | |
3482 | ||
07d29954 L |
3483 | switch (nfc->flow_type) { |
3484 | case TCP_V4_FLOW: | |
637053ef | 3485 | tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; |
07d29954 L |
3486 | break; |
3487 | case UDP_V4_FLOW: | |
637053ef | 3488 | tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; |
07d29954 L |
3489 | break; |
3490 | case TCP_V6_FLOW: | |
637053ef | 3491 | tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; |
07d29954 L |
3492 | break; |
3493 | case UDP_V6_FLOW: | |
637053ef | 3494 | tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; |
07d29954 L |
3495 | break; |
3496 | case SCTP_V4_FLOW: | |
637053ef | 3497 | tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; |
07d29954 L |
3498 | break; |
3499 | case SCTP_V6_FLOW: | |
637053ef | 3500 | tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; |
07d29954 L |
3501 | break; |
3502 | case IPV4_FLOW: | |
3503 | case IPV6_FLOW: | |
3504 | tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; | |
3505 | break; | |
3506 | default: | |
3507 | return -EINVAL; | |
3508 | } | |
3509 | ||
3510 | if (!tuple_sets) | |
3511 | return 0; | |
3512 | ||
3513 | if (tuple_sets & HCLGE_D_PORT_BIT) | |
3514 | nfc->data |= RXH_L4_B_2_3; | |
3515 | if (tuple_sets & HCLGE_S_PORT_BIT) | |
3516 | nfc->data |= RXH_L4_B_0_1; | |
3517 | if (tuple_sets & HCLGE_D_IP_BIT) | |
3518 | nfc->data |= RXH_IP_DST; | |
3519 | if (tuple_sets & HCLGE_S_IP_BIT) | |
3520 | nfc->data |= RXH_IP_SRC; | |
3521 | ||
3522 | return 0; | |
3523 | } | |
3524 | ||
46a3df9f S |
3525 | static int hclge_get_tc_size(struct hnae3_handle *handle) |
3526 | { | |
3527 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3528 | struct hclge_dev *hdev = vport->back; | |
3529 | ||
3530 | return hdev->rss_size_max; | |
3531 | } | |
3532 | ||
77f255c1 | 3533 | int hclge_rss_init_hw(struct hclge_dev *hdev) |
46a3df9f | 3534 | { |
46a3df9f | 3535 | struct hclge_vport *vport = hdev->vport; |
8015bb74 YL |
3536 | u8 *rss_indir = vport[0].rss_indirection_tbl; |
3537 | u16 rss_size = vport[0].alloc_rss_size; | |
3538 | u8 *key = vport[0].rss_hash_key; | |
3539 | u8 hfunc = vport[0].rss_algo; | |
46a3df9f | 3540 | u16 tc_offset[HCLGE_MAX_TC_NUM]; |
46a3df9f S |
3541 | u16 tc_valid[HCLGE_MAX_TC_NUM]; |
3542 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
8015bb74 YL |
3543 | u16 roundup_size; |
3544 | int i, ret; | |
68ece54e | 3545 | |
46a3df9f S |
3546 | ret = hclge_set_rss_indir_table(hdev, rss_indir); |
3547 | if (ret) | |
8015bb74 | 3548 | return ret; |
46a3df9f | 3549 | |
46a3df9f S |
3550 | ret = hclge_set_rss_algo_key(hdev, hfunc, key); |
3551 | if (ret) | |
8015bb74 | 3552 | return ret; |
46a3df9f S |
3553 | |
3554 | ret = hclge_set_rss_input_tuple(hdev); | |
3555 | if (ret) | |
8015bb74 | 3556 | return ret; |
46a3df9f | 3557 | |
68ece54e YL |
3558 | /* Each TC have the same queue size, and tc_size set to hardware is |
3559 | * the log2 of roundup power of two of rss_size, the acutal queue | |
3560 | * size is limited by indirection table. | |
3561 | */ | |
3562 | if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { | |
3563 | dev_err(&hdev->pdev->dev, | |
3564 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | |
3565 | rss_size); | |
8015bb74 | 3566 | return -EINVAL; |
68ece54e YL |
3567 | } |
3568 | ||
3569 | roundup_size = roundup_pow_of_two(rss_size); | |
3570 | roundup_size = ilog2(roundup_size); | |
3571 | ||
46a3df9f | 3572 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
68ece54e | 3573 | tc_valid[i] = 0; |
46a3df9f | 3574 | |
68ece54e YL |
3575 | if (!(hdev->hw_tc_map & BIT(i))) |
3576 | continue; | |
3577 | ||
3578 | tc_valid[i] = 1; | |
3579 | tc_size[i] = roundup_size; | |
3580 | tc_offset[i] = rss_size * i; | |
46a3df9f | 3581 | } |
68ece54e | 3582 | |
8015bb74 YL |
3583 | return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); |
3584 | } | |
46a3df9f | 3585 | |
8015bb74 YL |
3586 | void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) |
3587 | { | |
3588 | struct hclge_vport *vport = hdev->vport; | |
3589 | int i, j; | |
46a3df9f | 3590 | |
8015bb74 YL |
3591 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { |
3592 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3593 | vport[j].rss_indirection_tbl[i] = | |
3594 | i % vport[j].alloc_rss_size; | |
3595 | } | |
3596 | } | |
3597 | ||
3598 | static void hclge_rss_init_cfg(struct hclge_dev *hdev) | |
3599 | { | |
3600 | struct hclge_vport *vport = hdev->vport; | |
3601 | int i; | |
3602 | ||
8015bb74 YL |
3603 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { |
3604 | vport[i].rss_tuple_sets.ipv4_tcp_en = | |
3605 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3606 | vport[i].rss_tuple_sets.ipv4_udp_en = | |
3607 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3608 | vport[i].rss_tuple_sets.ipv4_sctp_en = | |
3609 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3610 | vport[i].rss_tuple_sets.ipv4_fragment_en = | |
3611 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3612 | vport[i].rss_tuple_sets.ipv6_tcp_en = | |
3613 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3614 | vport[i].rss_tuple_sets.ipv6_udp_en = | |
3615 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3616 | vport[i].rss_tuple_sets.ipv6_sctp_en = | |
3617 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3618 | vport[i].rss_tuple_sets.ipv6_fragment_en = | |
3619 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3620 | ||
3621 | vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
823fe868 FL |
3622 | |
3623 | netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
8015bb74 YL |
3624 | } |
3625 | ||
3626 | hclge_rss_indir_init_cfg(hdev); | |
46a3df9f S |
3627 | } |
3628 | ||
63d7e66f SM |
3629 | int hclge_bind_ring_with_vector(struct hclge_vport *vport, |
3630 | int vector_id, bool en, | |
3631 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3632 | { |
3633 | struct hclge_dev *hdev = vport->back; | |
46a3df9f S |
3634 | struct hnae3_ring_chain_node *node; |
3635 | struct hclge_desc desc; | |
63d7e66f SM |
3636 | struct hclge_ctrl_vector_chain_cmd *req |
3637 | = (struct hclge_ctrl_vector_chain_cmd *)desc.data; | |
3638 | enum hclge_cmd_status status; | |
3639 | enum hclge_opcode_type op; | |
3640 | u16 tqp_type_and_id; | |
46a3df9f S |
3641 | int i; |
3642 | ||
63d7e66f SM |
3643 | op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; |
3644 | hclge_cmd_setup_basic_desc(&desc, op, false); | |
46a3df9f S |
3645 | req->int_vector_id = vector_id; |
3646 | ||
3647 | i = 0; | |
3648 | for (node = ring_chain; node; node = node->next) { | |
63d7e66f | 3649 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); |
ccc23ef3 PL |
3650 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, |
3651 | HCLGE_INT_TYPE_S, | |
3652 | hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); | |
3653 | hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, | |
3654 | HCLGE_TQP_ID_S, node->tqp_index); | |
3655 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, | |
3656 | HCLGE_INT_GL_IDX_S, | |
3657 | hnae3_get_field(node->int_gl_idx, | |
3658 | HNAE3_RING_GL_IDX_M, | |
3659 | HNAE3_RING_GL_IDX_S)); | |
63d7e66f | 3660 | req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); |
46a3df9f S |
3661 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
3662 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | |
63d7e66f | 3663 | req->vfid = vport->vport_id; |
46a3df9f | 3664 | |
63d7e66f SM |
3665 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
3666 | if (status) { | |
46a3df9f S |
3667 | dev_err(&hdev->pdev->dev, |
3668 | "Map TQP fail, status is %d.\n", | |
63d7e66f SM |
3669 | status); |
3670 | return -EIO; | |
46a3df9f S |
3671 | } |
3672 | i = 0; | |
3673 | ||
3674 | hclge_cmd_setup_basic_desc(&desc, | |
63d7e66f | 3675 | op, |
46a3df9f S |
3676 | false); |
3677 | req->int_vector_id = vector_id; | |
3678 | } | |
3679 | } | |
3680 | ||
3681 | if (i > 0) { | |
3682 | req->int_cause_num = i; | |
63d7e66f SM |
3683 | req->vfid = vport->vport_id; |
3684 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3685 | if (status) { | |
46a3df9f | 3686 | dev_err(&hdev->pdev->dev, |
63d7e66f SM |
3687 | "Map TQP fail, status is %d.\n", status); |
3688 | return -EIO; | |
46a3df9f S |
3689 | } |
3690 | } | |
3691 | ||
3692 | return 0; | |
3693 | } | |
3694 | ||
63d7e66f SM |
3695 | static int hclge_map_ring_to_vector(struct hnae3_handle *handle, |
3696 | int vector, | |
3697 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3698 | { |
3699 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3700 | struct hclge_dev *hdev = vport->back; | |
3701 | int vector_id; | |
3702 | ||
3703 | vector_id = hclge_get_vector_index(hdev, vector); | |
3704 | if (vector_id < 0) { | |
3705 | dev_err(&hdev->pdev->dev, | |
63d7e66f | 3706 | "Get vector index fail. vector_id =%d\n", vector_id); |
46a3df9f S |
3707 | return vector_id; |
3708 | } | |
3709 | ||
63d7e66f | 3710 | return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); |
46a3df9f S |
3711 | } |
3712 | ||
63d7e66f SM |
3713 | static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, |
3714 | int vector, | |
3715 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3716 | { |
3717 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3718 | struct hclge_dev *hdev = vport->back; | |
63d7e66f | 3719 | int vector_id, ret; |
46a3df9f | 3720 | |
f9637cc2 PL |
3721 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
3722 | return 0; | |
3723 | ||
46a3df9f S |
3724 | vector_id = hclge_get_vector_index(hdev, vector); |
3725 | if (vector_id < 0) { | |
3726 | dev_err(&handle->pdev->dev, | |
3727 | "Get vector index fail. ret =%d\n", vector_id); | |
3728 | return vector_id; | |
3729 | } | |
3730 | ||
63d7e66f | 3731 | ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); |
7412200c | 3732 | if (ret) |
63d7e66f SM |
3733 | dev_err(&handle->pdev->dev, |
3734 | "Unmap ring from vector fail. vectorid=%d, ret =%d\n", | |
3735 | vector_id, | |
3736 | ret); | |
46a3df9f | 3737 | |
7412200c | 3738 | return ret; |
46a3df9f S |
3739 | } |
3740 | ||
3741 | int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, | |
3742 | struct hclge_promisc_param *param) | |
3743 | { | |
d44f9b63 | 3744 | struct hclge_promisc_cfg_cmd *req; |
46a3df9f S |
3745 | struct hclge_desc desc; |
3746 | int ret; | |
3747 | ||
3748 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); | |
3749 | ||
d44f9b63 | 3750 | req = (struct hclge_promisc_cfg_cmd *)desc.data; |
46a3df9f | 3751 | req->vf_id = param->vf_id; |
4771e104 PL |
3752 | |
3753 | /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on | |
3754 | * pdev revision(0x20), new revision support them. The | |
3755 | * value of this two fields will not return error when driver | |
3756 | * send command to fireware in revision(0x20). | |
3757 | */ | |
3758 | req->flag = (param->enable << HCLGE_PROMISC_EN_B) | | |
3759 | HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; | |
46a3df9f S |
3760 | |
3761 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 3762 | if (ret) |
46a3df9f S |
3763 | dev_err(&hdev->pdev->dev, |
3764 | "Set promisc mode fail, status is %d.\n", ret); | |
90415e85 JS |
3765 | |
3766 | return ret; | |
46a3df9f S |
3767 | } |
3768 | ||
3769 | void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, | |
3770 | bool en_mc, bool en_bc, int vport_id) | |
3771 | { | |
3772 | if (!param) | |
3773 | return; | |
3774 | ||
3775 | memset(param, 0, sizeof(struct hclge_promisc_param)); | |
3776 | if (en_uc) | |
3777 | param->enable = HCLGE_PROMISC_EN_UC; | |
3778 | if (en_mc) | |
3779 | param->enable |= HCLGE_PROMISC_EN_MC; | |
3780 | if (en_bc) | |
3781 | param->enable |= HCLGE_PROMISC_EN_BC; | |
3782 | param->vf_id = vport_id; | |
3783 | } | |
3784 | ||
abe62a63 HT |
3785 | static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, |
3786 | bool en_mc_pmc) | |
46a3df9f S |
3787 | { |
3788 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3789 | struct hclge_dev *hdev = vport->back; | |
3790 | struct hclge_promisc_param param; | |
3791 | ||
e8600a3d PL |
3792 | hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, |
3793 | vport->vport_id); | |
abe62a63 | 3794 | return hclge_cmd_set_promisc_mode(hdev, ¶m); |
46a3df9f S |
3795 | } |
3796 | ||
10a954bc JS |
3797 | static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) |
3798 | { | |
3799 | struct hclge_get_fd_mode_cmd *req; | |
3800 | struct hclge_desc desc; | |
3801 | int ret; | |
3802 | ||
3803 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); | |
3804 | ||
3805 | req = (struct hclge_get_fd_mode_cmd *)desc.data; | |
3806 | ||
3807 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3808 | if (ret) { | |
3809 | dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); | |
3810 | return ret; | |
3811 | } | |
3812 | ||
3813 | *fd_mode = req->mode; | |
3814 | ||
3815 | return ret; | |
3816 | } | |
3817 | ||
3818 | static int hclge_get_fd_allocation(struct hclge_dev *hdev, | |
3819 | u32 *stage1_entry_num, | |
3820 | u32 *stage2_entry_num, | |
3821 | u16 *stage1_counter_num, | |
3822 | u16 *stage2_counter_num) | |
3823 | { | |
3824 | struct hclge_get_fd_allocation_cmd *req; | |
3825 | struct hclge_desc desc; | |
3826 | int ret; | |
3827 | ||
3828 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); | |
3829 | ||
3830 | req = (struct hclge_get_fd_allocation_cmd *)desc.data; | |
3831 | ||
3832 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3833 | if (ret) { | |
3834 | dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", | |
3835 | ret); | |
3836 | return ret; | |
3837 | } | |
3838 | ||
3839 | *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); | |
3840 | *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); | |
3841 | *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); | |
3842 | *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); | |
3843 | ||
3844 | return ret; | |
3845 | } | |
3846 | ||
3847 | static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) | |
3848 | { | |
3849 | struct hclge_set_fd_key_config_cmd *req; | |
3850 | struct hclge_fd_key_cfg *stage; | |
3851 | struct hclge_desc desc; | |
3852 | int ret; | |
3853 | ||
3854 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); | |
3855 | ||
3856 | req = (struct hclge_set_fd_key_config_cmd *)desc.data; | |
3857 | stage = &hdev->fd_cfg.key_cfg[stage_num]; | |
3858 | req->stage = stage_num; | |
3859 | req->key_select = stage->key_sel; | |
3860 | req->inner_sipv6_word_en = stage->inner_sipv6_word_en; | |
3861 | req->inner_dipv6_word_en = stage->inner_dipv6_word_en; | |
3862 | req->outer_sipv6_word_en = stage->outer_sipv6_word_en; | |
3863 | req->outer_dipv6_word_en = stage->outer_dipv6_word_en; | |
3864 | req->tuple_mask = cpu_to_le32(~stage->tuple_active); | |
3865 | req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); | |
3866 | ||
3867 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3868 | if (ret) | |
3869 | dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); | |
3870 | ||
3871 | return ret; | |
3872 | } | |
3873 | ||
3874 | static int hclge_init_fd_config(struct hclge_dev *hdev) | |
3875 | { | |
3876 | #define LOW_2_WORDS 0x03 | |
3877 | struct hclge_fd_key_cfg *key_cfg; | |
3878 | int ret; | |
3879 | ||
3880 | if (!hnae3_dev_fd_supported(hdev)) | |
3881 | return 0; | |
3882 | ||
3883 | ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); | |
3884 | if (ret) | |
3885 | return ret; | |
3886 | ||
3887 | switch (hdev->fd_cfg.fd_mode) { | |
3888 | case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: | |
3889 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; | |
3890 | break; | |
3891 | case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: | |
3892 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; | |
3893 | break; | |
3894 | default: | |
3895 | dev_err(&hdev->pdev->dev, | |
3896 | "Unsupported flow director mode %d\n", | |
3897 | hdev->fd_cfg.fd_mode); | |
3898 | return -EOPNOTSUPP; | |
3899 | } | |
3900 | ||
3901 | hdev->fd_cfg.fd_en = true; | |
3902 | hdev->fd_cfg.proto_support = | |
3903 | TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | | |
3904 | UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; | |
3905 | key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; | |
3906 | key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, | |
3907 | key_cfg->inner_sipv6_word_en = LOW_2_WORDS; | |
3908 | key_cfg->inner_dipv6_word_en = LOW_2_WORDS; | |
3909 | key_cfg->outer_sipv6_word_en = 0; | |
3910 | key_cfg->outer_dipv6_word_en = 0; | |
3911 | ||
3912 | key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | | |
3913 | BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | | |
3914 | BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
3915 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
3916 | ||
3917 | /* If use max 400bit key, we can support tuples for ether type */ | |
3918 | if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { | |
3919 | hdev->fd_cfg.proto_support |= ETHER_FLOW; | |
3920 | key_cfg->tuple_active |= | |
3921 | BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); | |
3922 | } | |
3923 | ||
3924 | /* roce_type is used to filter roce frames | |
3925 | * dst_vport is used to specify the rule | |
3926 | */ | |
3927 | key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); | |
3928 | ||
3929 | ret = hclge_get_fd_allocation(hdev, | |
3930 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], | |
3931 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], | |
3932 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], | |
3933 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); | |
3934 | if (ret) | |
3935 | return ret; | |
3936 | ||
3937 | return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); | |
3938 | } | |
3939 | ||
7b829126 JS |
3940 | static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, |
3941 | int loc, u8 *key, bool is_add) | |
3942 | { | |
3943 | struct hclge_fd_tcam_config_1_cmd *req1; | |
3944 | struct hclge_fd_tcam_config_2_cmd *req2; | |
3945 | struct hclge_fd_tcam_config_3_cmd *req3; | |
3946 | struct hclge_desc desc[3]; | |
3947 | int ret; | |
3948 | ||
3949 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); | |
3950 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3951 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); | |
3952 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3953 | hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); | |
3954 | ||
3955 | req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; | |
3956 | req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; | |
3957 | req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; | |
3958 | ||
3959 | req1->stage = stage; | |
3960 | req1->xy_sel = sel_x ? 1 : 0; | |
3961 | hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); | |
3962 | req1->index = cpu_to_le32(loc); | |
3963 | req1->entry_vld = sel_x ? is_add : 0; | |
3964 | ||
3965 | if (key) { | |
3966 | memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); | |
3967 | memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], | |
3968 | sizeof(req2->tcam_data)); | |
3969 | memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + | |
3970 | sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); | |
3971 | } | |
3972 | ||
3973 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
3974 | if (ret) | |
3975 | dev_err(&hdev->pdev->dev, | |
3976 | "config tcam key fail, ret=%d\n", | |
3977 | ret); | |
3978 | ||
3979 | return ret; | |
3980 | } | |
3981 | ||
3982 | static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, | |
3983 | struct hclge_fd_ad_data *action) | |
3984 | { | |
3985 | struct hclge_fd_ad_config_cmd *req; | |
3986 | struct hclge_desc desc; | |
3987 | u64 ad_data = 0; | |
3988 | int ret; | |
3989 | ||
3990 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); | |
3991 | ||
3992 | req = (struct hclge_fd_ad_config_cmd *)desc.data; | |
3993 | req->index = cpu_to_le32(loc); | |
3994 | req->stage = stage; | |
3995 | ||
3996 | hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, | |
3997 | action->write_rule_id_to_bd); | |
3998 | hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, | |
3999 | action->rule_id); | |
4000 | ad_data <<= 32; | |
4001 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); | |
4002 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, | |
4003 | action->forward_to_direct_queue); | |
4004 | hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, | |
4005 | action->queue_id); | |
4006 | hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); | |
4007 | hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, | |
4008 | HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); | |
4009 | hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); | |
4010 | hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, | |
4011 | action->counter_id); | |
4012 | ||
4013 | req->ad_data = cpu_to_le64(ad_data); | |
4014 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4015 | if (ret) | |
4016 | dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); | |
4017 | ||
4018 | return ret; | |
4019 | } | |
4020 | ||
4021 | static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, | |
4022 | struct hclge_fd_rule *rule) | |
4023 | { | |
4024 | u16 tmp_x_s, tmp_y_s; | |
4025 | u32 tmp_x_l, tmp_y_l; | |
4026 | int i; | |
4027 | ||
4028 | if (rule->unused_tuple & tuple_bit) | |
4029 | return true; | |
4030 | ||
4031 | switch (tuple_bit) { | |
4032 | case 0: | |
4033 | return false; | |
4034 | case BIT(INNER_DST_MAC): | |
4035 | for (i = 0; i < 6; i++) { | |
4036 | calc_x(key_x[5 - i], rule->tuples.dst_mac[i], | |
4037 | rule->tuples_mask.dst_mac[i]); | |
4038 | calc_y(key_y[5 - i], rule->tuples.dst_mac[i], | |
4039 | rule->tuples_mask.dst_mac[i]); | |
4040 | } | |
4041 | ||
4042 | return true; | |
4043 | case BIT(INNER_SRC_MAC): | |
4044 | for (i = 0; i < 6; i++) { | |
4045 | calc_x(key_x[5 - i], rule->tuples.src_mac[i], | |
4046 | rule->tuples.src_mac[i]); | |
4047 | calc_y(key_y[5 - i], rule->tuples.src_mac[i], | |
4048 | rule->tuples.src_mac[i]); | |
4049 | } | |
4050 | ||
4051 | return true; | |
4052 | case BIT(INNER_VLAN_TAG_FST): | |
4053 | calc_x(tmp_x_s, rule->tuples.vlan_tag1, | |
4054 | rule->tuples_mask.vlan_tag1); | |
4055 | calc_y(tmp_y_s, rule->tuples.vlan_tag1, | |
4056 | rule->tuples_mask.vlan_tag1); | |
4057 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4058 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4059 | ||
4060 | return true; | |
4061 | case BIT(INNER_ETH_TYPE): | |
4062 | calc_x(tmp_x_s, rule->tuples.ether_proto, | |
4063 | rule->tuples_mask.ether_proto); | |
4064 | calc_y(tmp_y_s, rule->tuples.ether_proto, | |
4065 | rule->tuples_mask.ether_proto); | |
4066 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4067 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4068 | ||
4069 | return true; | |
4070 | case BIT(INNER_IP_TOS): | |
4071 | calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
4072 | calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
4073 | ||
4074 | return true; | |
4075 | case BIT(INNER_IP_PROTO): | |
4076 | calc_x(*key_x, rule->tuples.ip_proto, | |
4077 | rule->tuples_mask.ip_proto); | |
4078 | calc_y(*key_y, rule->tuples.ip_proto, | |
4079 | rule->tuples_mask.ip_proto); | |
4080 | ||
4081 | return true; | |
4082 | case BIT(INNER_SRC_IP): | |
4083 | calc_x(tmp_x_l, rule->tuples.src_ip[3], | |
4084 | rule->tuples_mask.src_ip[3]); | |
4085 | calc_y(tmp_y_l, rule->tuples.src_ip[3], | |
4086 | rule->tuples_mask.src_ip[3]); | |
4087 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
4088 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
4089 | ||
4090 | return true; | |
4091 | case BIT(INNER_DST_IP): | |
4092 | calc_x(tmp_x_l, rule->tuples.dst_ip[3], | |
4093 | rule->tuples_mask.dst_ip[3]); | |
4094 | calc_y(tmp_y_l, rule->tuples.dst_ip[3], | |
4095 | rule->tuples_mask.dst_ip[3]); | |
4096 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
4097 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
4098 | ||
4099 | return true; | |
4100 | case BIT(INNER_SRC_PORT): | |
4101 | calc_x(tmp_x_s, rule->tuples.src_port, | |
4102 | rule->tuples_mask.src_port); | |
4103 | calc_y(tmp_y_s, rule->tuples.src_port, | |
4104 | rule->tuples_mask.src_port); | |
4105 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4106 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4107 | ||
4108 | return true; | |
4109 | case BIT(INNER_DST_PORT): | |
4110 | calc_x(tmp_x_s, rule->tuples.dst_port, | |
4111 | rule->tuples_mask.dst_port); | |
4112 | calc_y(tmp_y_s, rule->tuples.dst_port, | |
4113 | rule->tuples_mask.dst_port); | |
4114 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4115 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4116 | ||
4117 | return true; | |
4118 | default: | |
4119 | return false; | |
4120 | } | |
4121 | } | |
4122 | ||
4123 | static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, | |
4124 | u8 vf_id, u8 network_port_id) | |
4125 | { | |
4126 | u32 port_number = 0; | |
4127 | ||
4128 | if (port_type == HOST_PORT) { | |
4129 | hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, | |
4130 | pf_id); | |
4131 | hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, | |
4132 | vf_id); | |
4133 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); | |
4134 | } else { | |
4135 | hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, | |
4136 | HCLGE_NETWORK_PORT_ID_S, network_port_id); | |
4137 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); | |
4138 | } | |
4139 | ||
4140 | return port_number; | |
4141 | } | |
4142 | ||
4143 | static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, | |
4144 | __le32 *key_x, __le32 *key_y, | |
4145 | struct hclge_fd_rule *rule) | |
4146 | { | |
4147 | u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; | |
4148 | u8 cur_pos = 0, tuple_size, shift_bits; | |
4149 | int i; | |
4150 | ||
4151 | for (i = 0; i < MAX_META_DATA; i++) { | |
4152 | tuple_size = meta_data_key_info[i].key_length; | |
4153 | tuple_bit = key_cfg->meta_data_active & BIT(i); | |
4154 | ||
4155 | switch (tuple_bit) { | |
4156 | case BIT(ROCE_TYPE): | |
4157 | hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); | |
4158 | cur_pos += tuple_size; | |
4159 | break; | |
4160 | case BIT(DST_VPORT): | |
4161 | port_number = hclge_get_port_number(HOST_PORT, 0, | |
4162 | rule->vf_id, 0); | |
4163 | hnae3_set_field(meta_data, | |
4164 | GENMASK(cur_pos + tuple_size, cur_pos), | |
4165 | cur_pos, port_number); | |
4166 | cur_pos += tuple_size; | |
4167 | break; | |
4168 | default: | |
4169 | break; | |
4170 | } | |
4171 | } | |
4172 | ||
4173 | calc_x(tmp_x, meta_data, 0xFFFFFFFF); | |
4174 | calc_y(tmp_y, meta_data, 0xFFFFFFFF); | |
4175 | shift_bits = sizeof(meta_data) * 8 - cur_pos; | |
4176 | ||
4177 | *key_x = cpu_to_le32(tmp_x << shift_bits); | |
4178 | *key_y = cpu_to_le32(tmp_y << shift_bits); | |
4179 | } | |
4180 | ||
4181 | /* A complete key is combined with meta data key and tuple key. | |
4182 | * Meta data key is stored at the MSB region, and tuple key is stored at | |
4183 | * the LSB region, unused bits will be filled 0. | |
4184 | */ | |
4185 | static int hclge_config_key(struct hclge_dev *hdev, u8 stage, | |
4186 | struct hclge_fd_rule *rule) | |
4187 | { | |
4188 | struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; | |
4189 | u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; | |
4190 | u8 *cur_key_x, *cur_key_y; | |
4191 | int i, ret, tuple_size; | |
4192 | u8 meta_data_region; | |
4193 | ||
4194 | memset(key_x, 0, sizeof(key_x)); | |
4195 | memset(key_y, 0, sizeof(key_y)); | |
4196 | cur_key_x = key_x; | |
4197 | cur_key_y = key_y; | |
4198 | ||
4199 | for (i = 0 ; i < MAX_TUPLE; i++) { | |
4200 | bool tuple_valid; | |
4201 | u32 check_tuple; | |
4202 | ||
4203 | tuple_size = tuple_key_info[i].key_length / 8; | |
4204 | check_tuple = key_cfg->tuple_active & BIT(i); | |
4205 | ||
4206 | tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, | |
4207 | cur_key_y, rule); | |
4208 | if (tuple_valid) { | |
4209 | cur_key_x += tuple_size; | |
4210 | cur_key_y += tuple_size; | |
4211 | } | |
4212 | } | |
4213 | ||
4214 | meta_data_region = hdev->fd_cfg.max_key_length / 8 - | |
4215 | MAX_META_DATA_LENGTH / 8; | |
4216 | ||
4217 | hclge_fd_convert_meta_data(key_cfg, | |
4218 | (__le32 *)(key_x + meta_data_region), | |
4219 | (__le32 *)(key_y + meta_data_region), | |
4220 | rule); | |
4221 | ||
4222 | ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, | |
4223 | true); | |
4224 | if (ret) { | |
4225 | dev_err(&hdev->pdev->dev, | |
4226 | "fd key_y config fail, loc=%d, ret=%d\n", | |
4227 | rule->queue_id, ret); | |
4228 | return ret; | |
4229 | } | |
4230 | ||
4231 | ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, | |
4232 | true); | |
4233 | if (ret) | |
4234 | dev_err(&hdev->pdev->dev, | |
4235 | "fd key_x config fail, loc=%d, ret=%d\n", | |
4236 | rule->queue_id, ret); | |
4237 | return ret; | |
4238 | } | |
4239 | ||
4240 | static int hclge_config_action(struct hclge_dev *hdev, u8 stage, | |
4241 | struct hclge_fd_rule *rule) | |
4242 | { | |
4243 | struct hclge_fd_ad_data ad_data; | |
4244 | ||
4245 | ad_data.ad_id = rule->location; | |
4246 | ||
4247 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
4248 | ad_data.drop_packet = true; | |
4249 | ad_data.forward_to_direct_queue = false; | |
4250 | ad_data.queue_id = 0; | |
4251 | } else { | |
4252 | ad_data.drop_packet = false; | |
4253 | ad_data.forward_to_direct_queue = true; | |
4254 | ad_data.queue_id = rule->queue_id; | |
4255 | } | |
4256 | ||
4257 | ad_data.use_counter = false; | |
4258 | ad_data.counter_id = 0; | |
4259 | ||
4260 | ad_data.use_next_stage = false; | |
4261 | ad_data.next_input_key = 0; | |
4262 | ||
4263 | ad_data.write_rule_id_to_bd = true; | |
4264 | ad_data.rule_id = rule->location; | |
4265 | ||
4266 | return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); | |
4267 | } | |
4268 | ||
3ca8e27c JS |
4269 | static int hclge_fd_check_spec(struct hclge_dev *hdev, |
4270 | struct ethtool_rx_flow_spec *fs, u32 *unused) | |
4271 | { | |
4272 | struct ethtool_tcpip4_spec *tcp_ip4_spec; | |
4273 | struct ethtool_usrip4_spec *usr_ip4_spec; | |
4274 | struct ethtool_tcpip6_spec *tcp_ip6_spec; | |
4275 | struct ethtool_usrip6_spec *usr_ip6_spec; | |
4276 | struct ethhdr *ether_spec; | |
4277 | ||
4278 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4279 | return -EINVAL; | |
4280 | ||
4281 | if (!(fs->flow_type & hdev->fd_cfg.proto_support)) | |
4282 | return -EOPNOTSUPP; | |
4283 | ||
4284 | if ((fs->flow_type & FLOW_EXT) && | |
4285 | (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { | |
4286 | dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); | |
4287 | return -EOPNOTSUPP; | |
4288 | } | |
4289 | ||
4290 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4291 | case SCTP_V4_FLOW: | |
4292 | case TCP_V4_FLOW: | |
4293 | case UDP_V4_FLOW: | |
4294 | tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; | |
4295 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); | |
4296 | ||
4297 | if (!tcp_ip4_spec->ip4src) | |
4298 | *unused |= BIT(INNER_SRC_IP); | |
4299 | ||
4300 | if (!tcp_ip4_spec->ip4dst) | |
4301 | *unused |= BIT(INNER_DST_IP); | |
4302 | ||
4303 | if (!tcp_ip4_spec->psrc) | |
4304 | *unused |= BIT(INNER_SRC_PORT); | |
4305 | ||
4306 | if (!tcp_ip4_spec->pdst) | |
4307 | *unused |= BIT(INNER_DST_PORT); | |
4308 | ||
4309 | if (!tcp_ip4_spec->tos) | |
4310 | *unused |= BIT(INNER_IP_TOS); | |
4311 | ||
4312 | break; | |
4313 | case IP_USER_FLOW: | |
4314 | usr_ip4_spec = &fs->h_u.usr_ip4_spec; | |
4315 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4316 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
4317 | ||
4318 | if (!usr_ip4_spec->ip4src) | |
4319 | *unused |= BIT(INNER_SRC_IP); | |
4320 | ||
4321 | if (!usr_ip4_spec->ip4dst) | |
4322 | *unused |= BIT(INNER_DST_IP); | |
4323 | ||
4324 | if (!usr_ip4_spec->tos) | |
4325 | *unused |= BIT(INNER_IP_TOS); | |
4326 | ||
4327 | if (!usr_ip4_spec->proto) | |
4328 | *unused |= BIT(INNER_IP_PROTO); | |
4329 | ||
4330 | if (usr_ip4_spec->l4_4_bytes) | |
4331 | return -EOPNOTSUPP; | |
4332 | ||
4333 | if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) | |
4334 | return -EOPNOTSUPP; | |
4335 | ||
4336 | break; | |
4337 | case SCTP_V6_FLOW: | |
4338 | case TCP_V6_FLOW: | |
4339 | case UDP_V6_FLOW: | |
4340 | tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; | |
4341 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4342 | BIT(INNER_IP_TOS); | |
4343 | ||
4344 | if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && | |
4345 | !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) | |
4346 | *unused |= BIT(INNER_SRC_IP); | |
4347 | ||
4348 | if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && | |
4349 | !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) | |
4350 | *unused |= BIT(INNER_DST_IP); | |
4351 | ||
4352 | if (!tcp_ip6_spec->psrc) | |
4353 | *unused |= BIT(INNER_SRC_PORT); | |
4354 | ||
4355 | if (!tcp_ip6_spec->pdst) | |
4356 | *unused |= BIT(INNER_DST_PORT); | |
4357 | ||
4358 | if (tcp_ip6_spec->tclass) | |
4359 | return -EOPNOTSUPP; | |
4360 | ||
4361 | break; | |
4362 | case IPV6_USER_FLOW: | |
4363 | usr_ip6_spec = &fs->h_u.usr_ip6_spec; | |
4364 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4365 | BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | | |
4366 | BIT(INNER_DST_PORT); | |
4367 | ||
4368 | if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && | |
4369 | !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) | |
4370 | *unused |= BIT(INNER_SRC_IP); | |
4371 | ||
4372 | if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && | |
4373 | !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) | |
4374 | *unused |= BIT(INNER_DST_IP); | |
4375 | ||
4376 | if (!usr_ip6_spec->l4_proto) | |
4377 | *unused |= BIT(INNER_IP_PROTO); | |
4378 | ||
4379 | if (usr_ip6_spec->tclass) | |
4380 | return -EOPNOTSUPP; | |
4381 | ||
4382 | if (usr_ip6_spec->l4_4_bytes) | |
4383 | return -EOPNOTSUPP; | |
4384 | ||
4385 | break; | |
4386 | case ETHER_FLOW: | |
4387 | ether_spec = &fs->h_u.ether_spec; | |
4388 | *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
4389 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | | |
4390 | BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); | |
4391 | ||
4392 | if (is_zero_ether_addr(ether_spec->h_source)) | |
4393 | *unused |= BIT(INNER_SRC_MAC); | |
4394 | ||
4395 | if (is_zero_ether_addr(ether_spec->h_dest)) | |
4396 | *unused |= BIT(INNER_DST_MAC); | |
4397 | ||
4398 | if (!ether_spec->h_proto) | |
4399 | *unused |= BIT(INNER_ETH_TYPE); | |
4400 | ||
4401 | break; | |
4402 | default: | |
4403 | return -EOPNOTSUPP; | |
4404 | } | |
4405 | ||
4406 | if ((fs->flow_type & FLOW_EXT)) { | |
4407 | if (fs->h_ext.vlan_etype) | |
4408 | return -EOPNOTSUPP; | |
4409 | if (!fs->h_ext.vlan_tci) | |
4410 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4411 | ||
4412 | if (fs->m_ext.vlan_tci) { | |
4413 | if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) | |
4414 | return -EINVAL; | |
4415 | } | |
4416 | } else { | |
4417 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4418 | } | |
4419 | ||
4420 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4421 | if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) | |
4422 | return -EOPNOTSUPP; | |
4423 | ||
4424 | if (is_zero_ether_addr(fs->h_ext.h_dest)) | |
4425 | *unused |= BIT(INNER_DST_MAC); | |
4426 | else | |
4427 | *unused &= ~(BIT(INNER_DST_MAC)); | |
4428 | } | |
4429 | ||
4430 | return 0; | |
4431 | } | |
4432 | ||
4433 | static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) | |
4434 | { | |
4435 | struct hclge_fd_rule *rule = NULL; | |
4436 | struct hlist_node *node2; | |
4437 | ||
4438 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4439 | if (rule->location >= location) | |
4440 | break; | |
4441 | } | |
4442 | ||
4443 | return rule && rule->location == location; | |
4444 | } | |
4445 | ||
4446 | static int hclge_fd_update_rule_list(struct hclge_dev *hdev, | |
4447 | struct hclge_fd_rule *new_rule, | |
4448 | u16 location, | |
4449 | bool is_add) | |
4450 | { | |
4451 | struct hclge_fd_rule *rule = NULL, *parent = NULL; | |
4452 | struct hlist_node *node2; | |
4453 | ||
4454 | if (is_add && !new_rule) | |
4455 | return -EINVAL; | |
4456 | ||
4457 | hlist_for_each_entry_safe(rule, node2, | |
4458 | &hdev->fd_rule_list, rule_node) { | |
4459 | if (rule->location >= location) | |
4460 | break; | |
4461 | parent = rule; | |
4462 | } | |
4463 | ||
4464 | if (rule && rule->location == location) { | |
4465 | hlist_del(&rule->rule_node); | |
4466 | kfree(rule); | |
4467 | hdev->hclge_fd_rule_num--; | |
4468 | ||
4469 | if (!is_add) | |
4470 | return 0; | |
4471 | ||
4472 | } else if (!is_add) { | |
4473 | dev_err(&hdev->pdev->dev, | |
4474 | "delete fail, rule %d is inexistent\n", | |
4475 | location); | |
4476 | return -EINVAL; | |
4477 | } | |
4478 | ||
4479 | INIT_HLIST_NODE(&new_rule->rule_node); | |
4480 | ||
4481 | if (parent) | |
4482 | hlist_add_behind(&new_rule->rule_node, &parent->rule_node); | |
4483 | else | |
4484 | hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); | |
4485 | ||
4486 | hdev->hclge_fd_rule_num++; | |
4487 | ||
4488 | return 0; | |
4489 | } | |
4490 | ||
4491 | static int hclge_fd_get_tuple(struct hclge_dev *hdev, | |
4492 | struct ethtool_rx_flow_spec *fs, | |
4493 | struct hclge_fd_rule *rule) | |
4494 | { | |
4495 | u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); | |
4496 | ||
4497 | switch (flow_type) { | |
4498 | case SCTP_V4_FLOW: | |
4499 | case TCP_V4_FLOW: | |
4500 | case UDP_V4_FLOW: | |
4501 | rule->tuples.src_ip[3] = | |
4502 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); | |
4503 | rule->tuples_mask.src_ip[3] = | |
4504 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); | |
4505 | ||
4506 | rule->tuples.dst_ip[3] = | |
4507 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); | |
4508 | rule->tuples_mask.dst_ip[3] = | |
4509 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); | |
4510 | ||
4511 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); | |
4512 | rule->tuples_mask.src_port = | |
4513 | be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); | |
4514 | ||
4515 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); | |
4516 | rule->tuples_mask.dst_port = | |
4517 | be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); | |
4518 | ||
4519 | rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; | |
4520 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; | |
4521 | ||
4522 | rule->tuples.ether_proto = ETH_P_IP; | |
4523 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4524 | ||
4525 | break; | |
4526 | case IP_USER_FLOW: | |
4527 | rule->tuples.src_ip[3] = | |
4528 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); | |
4529 | rule->tuples_mask.src_ip[3] = | |
4530 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); | |
4531 | ||
4532 | rule->tuples.dst_ip[3] = | |
4533 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); | |
4534 | rule->tuples_mask.dst_ip[3] = | |
4535 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); | |
4536 | ||
4537 | rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; | |
4538 | rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; | |
4539 | ||
4540 | rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; | |
4541 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; | |
4542 | ||
4543 | rule->tuples.ether_proto = ETH_P_IP; | |
4544 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4545 | ||
4546 | break; | |
4547 | case SCTP_V6_FLOW: | |
4548 | case TCP_V6_FLOW: | |
4549 | case UDP_V6_FLOW: | |
4550 | be32_to_cpu_array(rule->tuples.src_ip, | |
4551 | fs->h_u.tcp_ip6_spec.ip6src, 4); | |
4552 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4553 | fs->m_u.tcp_ip6_spec.ip6src, 4); | |
4554 | ||
4555 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4556 | fs->h_u.tcp_ip6_spec.ip6dst, 4); | |
4557 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4558 | fs->m_u.tcp_ip6_spec.ip6dst, 4); | |
4559 | ||
4560 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); | |
4561 | rule->tuples_mask.src_port = | |
4562 | be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); | |
4563 | ||
4564 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); | |
4565 | rule->tuples_mask.dst_port = | |
4566 | be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); | |
4567 | ||
4568 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4569 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4570 | ||
4571 | break; | |
4572 | case IPV6_USER_FLOW: | |
4573 | be32_to_cpu_array(rule->tuples.src_ip, | |
4574 | fs->h_u.usr_ip6_spec.ip6src, 4); | |
4575 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4576 | fs->m_u.usr_ip6_spec.ip6src, 4); | |
4577 | ||
4578 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4579 | fs->h_u.usr_ip6_spec.ip6dst, 4); | |
4580 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4581 | fs->m_u.usr_ip6_spec.ip6dst, 4); | |
4582 | ||
4583 | rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; | |
4584 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; | |
4585 | ||
4586 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4587 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4588 | ||
4589 | break; | |
4590 | case ETHER_FLOW: | |
4591 | ether_addr_copy(rule->tuples.src_mac, | |
4592 | fs->h_u.ether_spec.h_source); | |
4593 | ether_addr_copy(rule->tuples_mask.src_mac, | |
4594 | fs->m_u.ether_spec.h_source); | |
4595 | ||
4596 | ether_addr_copy(rule->tuples.dst_mac, | |
4597 | fs->h_u.ether_spec.h_dest); | |
4598 | ether_addr_copy(rule->tuples_mask.dst_mac, | |
4599 | fs->m_u.ether_spec.h_dest); | |
4600 | ||
4601 | rule->tuples.ether_proto = | |
4602 | be16_to_cpu(fs->h_u.ether_spec.h_proto); | |
4603 | rule->tuples_mask.ether_proto = | |
4604 | be16_to_cpu(fs->m_u.ether_spec.h_proto); | |
4605 | ||
4606 | break; | |
4607 | default: | |
4608 | return -EOPNOTSUPP; | |
4609 | } | |
4610 | ||
4611 | switch (flow_type) { | |
4612 | case SCTP_V4_FLOW: | |
4613 | case SCTP_V6_FLOW: | |
4614 | rule->tuples.ip_proto = IPPROTO_SCTP; | |
4615 | rule->tuples_mask.ip_proto = 0xFF; | |
4616 | break; | |
4617 | case TCP_V4_FLOW: | |
4618 | case TCP_V6_FLOW: | |
4619 | rule->tuples.ip_proto = IPPROTO_TCP; | |
4620 | rule->tuples_mask.ip_proto = 0xFF; | |
4621 | break; | |
4622 | case UDP_V4_FLOW: | |
4623 | case UDP_V6_FLOW: | |
4624 | rule->tuples.ip_proto = IPPROTO_UDP; | |
4625 | rule->tuples_mask.ip_proto = 0xFF; | |
4626 | break; | |
4627 | default: | |
4628 | break; | |
4629 | } | |
4630 | ||
4631 | if ((fs->flow_type & FLOW_EXT)) { | |
4632 | rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); | |
4633 | rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); | |
4634 | } | |
4635 | ||
4636 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4637 | ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); | |
4638 | ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); | |
4639 | } | |
4640 | ||
4641 | return 0; | |
4642 | } | |
4643 | ||
4644 | static int hclge_add_fd_entry(struct hnae3_handle *handle, | |
4645 | struct ethtool_rxnfc *cmd) | |
4646 | { | |
4647 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4648 | struct hclge_dev *hdev = vport->back; | |
4649 | u16 dst_vport_id = 0, q_index = 0; | |
4650 | struct ethtool_rx_flow_spec *fs; | |
4651 | struct hclge_fd_rule *rule; | |
4652 | u32 unused = 0; | |
4653 | u8 action; | |
4654 | int ret; | |
4655 | ||
4656 | if (!hnae3_dev_fd_supported(hdev)) | |
4657 | return -EOPNOTSUPP; | |
4658 | ||
4659 | if (!hdev->fd_cfg.fd_en) { | |
4660 | dev_warn(&hdev->pdev->dev, | |
4661 | "Please enable flow director first\n"); | |
4662 | return -EOPNOTSUPP; | |
4663 | } | |
4664 | ||
4665 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4666 | ||
4667 | ret = hclge_fd_check_spec(hdev, fs, &unused); | |
4668 | if (ret) { | |
4669 | dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); | |
4670 | return ret; | |
4671 | } | |
4672 | ||
4673 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) { | |
4674 | action = HCLGE_FD_ACTION_DROP_PACKET; | |
4675 | } else { | |
4676 | u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); | |
4677 | u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); | |
4678 | u16 tqps; | |
4679 | ||
9eb3eb33 JS |
4680 | if (vf > hdev->num_req_vfs) { |
4681 | dev_err(&hdev->pdev->dev, | |
4682 | "Error: vf id (%d) > max vf num (%d)\n", | |
4683 | vf, hdev->num_req_vfs); | |
4684 | return -EINVAL; | |
4685 | } | |
4686 | ||
3ca8e27c JS |
4687 | dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; |
4688 | tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; | |
4689 | ||
4690 | if (ring >= tqps) { | |
4691 | dev_err(&hdev->pdev->dev, | |
4692 | "Error: queue id (%d) > max tqp num (%d)\n", | |
4693 | ring, tqps - 1); | |
4694 | return -EINVAL; | |
4695 | } | |
4696 | ||
3ca8e27c JS |
4697 | action = HCLGE_FD_ACTION_ACCEPT_PACKET; |
4698 | q_index = ring; | |
4699 | } | |
4700 | ||
4701 | rule = kzalloc(sizeof(*rule), GFP_KERNEL); | |
4702 | if (!rule) | |
4703 | return -ENOMEM; | |
4704 | ||
4705 | ret = hclge_fd_get_tuple(hdev, fs, rule); | |
4706 | if (ret) | |
4707 | goto free_rule; | |
4708 | ||
4709 | rule->flow_type = fs->flow_type; | |
4710 | ||
4711 | rule->location = fs->location; | |
4712 | rule->unused_tuple = unused; | |
4713 | rule->vf_id = dst_vport_id; | |
4714 | rule->queue_id = q_index; | |
4715 | rule->action = action; | |
4716 | ||
4717 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4718 | if (ret) | |
4719 | goto free_rule; | |
4720 | ||
4721 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4722 | if (ret) | |
4723 | goto free_rule; | |
4724 | ||
4725 | ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); | |
4726 | if (ret) | |
4727 | goto free_rule; | |
4728 | ||
4729 | return ret; | |
4730 | ||
4731 | free_rule: | |
4732 | kfree(rule); | |
4733 | return ret; | |
4734 | } | |
4735 | ||
4736 | static int hclge_del_fd_entry(struct hnae3_handle *handle, | |
4737 | struct ethtool_rxnfc *cmd) | |
4738 | { | |
4739 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4740 | struct hclge_dev *hdev = vport->back; | |
4741 | struct ethtool_rx_flow_spec *fs; | |
4742 | int ret; | |
4743 | ||
4744 | if (!hnae3_dev_fd_supported(hdev)) | |
4745 | return -EOPNOTSUPP; | |
4746 | ||
4747 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4748 | ||
4749 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4750 | return -EINVAL; | |
4751 | ||
4752 | if (!hclge_fd_rule_exist(hdev, fs->location)) { | |
4753 | dev_err(&hdev->pdev->dev, | |
4754 | "Delete fail, rule %d is inexistent\n", | |
4755 | fs->location); | |
4756 | return -ENOENT; | |
4757 | } | |
4758 | ||
4759 | ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4760 | fs->location, NULL, false); | |
4761 | if (ret) | |
4762 | return ret; | |
4763 | ||
4764 | return hclge_fd_update_rule_list(hdev, NULL, fs->location, | |
4765 | false); | |
4766 | } | |
4767 | ||
7ce98982 JS |
4768 | static void hclge_del_all_fd_entries(struct hnae3_handle *handle, |
4769 | bool clear_list) | |
4770 | { | |
4771 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4772 | struct hclge_dev *hdev = vport->back; | |
4773 | struct hclge_fd_rule *rule; | |
4774 | struct hlist_node *node; | |
4775 | ||
4776 | if (!hnae3_dev_fd_supported(hdev)) | |
4777 | return; | |
4778 | ||
4779 | if (clear_list) { | |
4780 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4781 | rule_node) { | |
4782 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4783 | rule->location, NULL, false); | |
4784 | hlist_del(&rule->rule_node); | |
4785 | kfree(rule); | |
4786 | hdev->hclge_fd_rule_num--; | |
4787 | } | |
4788 | } else { | |
4789 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4790 | rule_node) | |
4791 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4792 | rule->location, NULL, false); | |
4793 | } | |
4794 | } | |
4795 | ||
4796 | static int hclge_restore_fd_entries(struct hnae3_handle *handle) | |
4797 | { | |
4798 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4799 | struct hclge_dev *hdev = vport->back; | |
4800 | struct hclge_fd_rule *rule; | |
4801 | struct hlist_node *node; | |
4802 | int ret; | |
4803 | ||
1afdb53a HT |
4804 | /* Return ok here, because reset error handling will check this |
4805 | * return value. If error is returned here, the reset process will | |
4806 | * fail. | |
4807 | */ | |
7ce98982 | 4808 | if (!hnae3_dev_fd_supported(hdev)) |
1afdb53a | 4809 | return 0; |
7ce98982 | 4810 | |
626cb23b JS |
4811 | /* if fd is disabled, should not restore it when reset */ |
4812 | if (!hdev->fd_cfg.fd_en) | |
4813 | return 0; | |
4814 | ||
7ce98982 JS |
4815 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { |
4816 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4817 | if (!ret) | |
4818 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4819 | ||
4820 | if (ret) { | |
4821 | dev_warn(&hdev->pdev->dev, | |
4822 | "Restore rule %d failed, remove it\n", | |
4823 | rule->location); | |
4824 | hlist_del(&rule->rule_node); | |
4825 | kfree(rule); | |
4826 | hdev->hclge_fd_rule_num--; | |
4827 | } | |
4828 | } | |
4829 | return 0; | |
4830 | } | |
4831 | ||
295043a7 JS |
4832 | static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, |
4833 | struct ethtool_rxnfc *cmd) | |
4834 | { | |
4835 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4836 | struct hclge_dev *hdev = vport->back; | |
4837 | ||
4838 | if (!hnae3_dev_fd_supported(hdev)) | |
4839 | return -EOPNOTSUPP; | |
4840 | ||
4841 | cmd->rule_cnt = hdev->hclge_fd_rule_num; | |
4842 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
4843 | ||
4844 | return 0; | |
4845 | } | |
4846 | ||
4847 | static int hclge_get_fd_rule_info(struct hnae3_handle *handle, | |
4848 | struct ethtool_rxnfc *cmd) | |
4849 | { | |
4850 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4851 | struct hclge_fd_rule *rule = NULL; | |
4852 | struct hclge_dev *hdev = vport->back; | |
4853 | struct ethtool_rx_flow_spec *fs; | |
4854 | struct hlist_node *node2; | |
4855 | ||
4856 | if (!hnae3_dev_fd_supported(hdev)) | |
4857 | return -EOPNOTSUPP; | |
4858 | ||
4859 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4860 | ||
4861 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4862 | if (rule->location >= fs->location) | |
4863 | break; | |
4864 | } | |
4865 | ||
4866 | if (!rule || fs->location != rule->location) | |
4867 | return -ENOENT; | |
4868 | ||
4869 | fs->flow_type = rule->flow_type; | |
4870 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4871 | case SCTP_V4_FLOW: | |
4872 | case TCP_V4_FLOW: | |
4873 | case UDP_V4_FLOW: | |
4874 | fs->h_u.tcp_ip4_spec.ip4src = | |
4875 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4876 | fs->m_u.tcp_ip4_spec.ip4src = | |
4877 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4878 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4879 | ||
4880 | fs->h_u.tcp_ip4_spec.ip4dst = | |
4881 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4882 | fs->m_u.tcp_ip4_spec.ip4dst = | |
4883 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4884 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4885 | ||
4886 | fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4887 | fs->m_u.tcp_ip4_spec.psrc = | |
4888 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4889 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4890 | ||
4891 | fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4892 | fs->m_u.tcp_ip4_spec.pdst = | |
4893 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4894 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4895 | ||
4896 | fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; | |
4897 | fs->m_u.tcp_ip4_spec.tos = | |
4898 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4899 | 0 : rule->tuples_mask.ip_tos; | |
4900 | ||
4901 | break; | |
4902 | case IP_USER_FLOW: | |
4903 | fs->h_u.usr_ip4_spec.ip4src = | |
4904 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4905 | fs->m_u.tcp_ip4_spec.ip4src = | |
4906 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4907 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4908 | ||
4909 | fs->h_u.usr_ip4_spec.ip4dst = | |
4910 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4911 | fs->m_u.usr_ip4_spec.ip4dst = | |
4912 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4913 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4914 | ||
4915 | fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; | |
4916 | fs->m_u.usr_ip4_spec.tos = | |
4917 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4918 | 0 : rule->tuples_mask.ip_tos; | |
4919 | ||
4920 | fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; | |
4921 | fs->m_u.usr_ip4_spec.proto = | |
4922 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4923 | 0 : rule->tuples_mask.ip_proto; | |
4924 | ||
4925 | fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; | |
4926 | ||
4927 | break; | |
4928 | case SCTP_V6_FLOW: | |
4929 | case TCP_V6_FLOW: | |
4930 | case UDP_V6_FLOW: | |
4931 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, | |
4932 | rule->tuples.src_ip, 4); | |
4933 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4934 | memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4935 | else | |
4936 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, | |
4937 | rule->tuples_mask.src_ip, 4); | |
4938 | ||
4939 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, | |
4940 | rule->tuples.dst_ip, 4); | |
4941 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4942 | memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4943 | else | |
4944 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, | |
4945 | rule->tuples_mask.dst_ip, 4); | |
4946 | ||
4947 | fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4948 | fs->m_u.tcp_ip6_spec.psrc = | |
4949 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4950 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4951 | ||
4952 | fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4953 | fs->m_u.tcp_ip6_spec.pdst = | |
4954 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4955 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4956 | ||
4957 | break; | |
4958 | case IPV6_USER_FLOW: | |
4959 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, | |
4960 | rule->tuples.src_ip, 4); | |
4961 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4962 | memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4963 | else | |
4964 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, | |
4965 | rule->tuples_mask.src_ip, 4); | |
4966 | ||
4967 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, | |
4968 | rule->tuples.dst_ip, 4); | |
4969 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4970 | memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4971 | else | |
4972 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, | |
4973 | rule->tuples_mask.dst_ip, 4); | |
4974 | ||
4975 | fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; | |
4976 | fs->m_u.usr_ip6_spec.l4_proto = | |
4977 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4978 | 0 : rule->tuples_mask.ip_proto; | |
4979 | ||
4980 | break; | |
4981 | case ETHER_FLOW: | |
4982 | ether_addr_copy(fs->h_u.ether_spec.h_source, | |
4983 | rule->tuples.src_mac); | |
4984 | if (rule->unused_tuple & BIT(INNER_SRC_MAC)) | |
4985 | eth_zero_addr(fs->m_u.ether_spec.h_source); | |
4986 | else | |
4987 | ether_addr_copy(fs->m_u.ether_spec.h_source, | |
4988 | rule->tuples_mask.src_mac); | |
4989 | ||
4990 | ether_addr_copy(fs->h_u.ether_spec.h_dest, | |
4991 | rule->tuples.dst_mac); | |
4992 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4993 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4994 | else | |
4995 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4996 | rule->tuples_mask.dst_mac); | |
4997 | ||
4998 | fs->h_u.ether_spec.h_proto = | |
4999 | cpu_to_be16(rule->tuples.ether_proto); | |
5000 | fs->m_u.ether_spec.h_proto = | |
5001 | rule->unused_tuple & BIT(INNER_ETH_TYPE) ? | |
5002 | 0 : cpu_to_be16(rule->tuples_mask.ether_proto); | |
5003 | ||
5004 | break; | |
5005 | default: | |
5006 | return -EOPNOTSUPP; | |
5007 | } | |
5008 | ||
5009 | if (fs->flow_type & FLOW_EXT) { | |
5010 | fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); | |
5011 | fs->m_ext.vlan_tci = | |
5012 | rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? | |
5013 | cpu_to_be16(VLAN_VID_MASK) : | |
5014 | cpu_to_be16(rule->tuples_mask.vlan_tag1); | |
5015 | } | |
5016 | ||
5017 | if (fs->flow_type & FLOW_MAC_EXT) { | |
5018 | ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); | |
5019 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
5020 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
5021 | else | |
5022 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
5023 | rule->tuples_mask.dst_mac); | |
5024 | } | |
5025 | ||
5026 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
5027 | fs->ring_cookie = RX_CLS_FLOW_DISC; | |
5028 | } else { | |
5029 | u64 vf_id; | |
5030 | ||
5031 | fs->ring_cookie = rule->queue_id; | |
5032 | vf_id = rule->vf_id; | |
5033 | vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; | |
5034 | fs->ring_cookie |= vf_id; | |
5035 | } | |
5036 | ||
5037 | return 0; | |
5038 | } | |
5039 | ||
5040 | static int hclge_get_all_rules(struct hnae3_handle *handle, | |
5041 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | |
5042 | { | |
5043 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5044 | struct hclge_dev *hdev = vport->back; | |
5045 | struct hclge_fd_rule *rule; | |
5046 | struct hlist_node *node2; | |
5047 | int cnt = 0; | |
5048 | ||
5049 | if (!hnae3_dev_fd_supported(hdev)) | |
5050 | return -EOPNOTSUPP; | |
5051 | ||
5052 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
5053 | ||
5054 | hlist_for_each_entry_safe(rule, node2, | |
5055 | &hdev->fd_rule_list, rule_node) { | |
5056 | if (cnt == cmd->rule_cnt) | |
5057 | return -EMSGSIZE; | |
5058 | ||
5059 | rule_locs[cnt] = rule->location; | |
5060 | cnt++; | |
5061 | } | |
5062 | ||
5063 | cmd->rule_cnt = cnt; | |
5064 | ||
5065 | return 0; | |
5066 | } | |
5067 | ||
225c02eb HT |
5068 | static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) |
5069 | { | |
5070 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5071 | struct hclge_dev *hdev = vport->back; | |
5072 | ||
5073 | return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || | |
5074 | hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); | |
5075 | } | |
5076 | ||
5077 | static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) | |
5078 | { | |
5079 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5080 | struct hclge_dev *hdev = vport->back; | |
5081 | ||
5082 | return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
5083 | } | |
5084 | ||
5085 | static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) | |
5086 | { | |
5087 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5088 | struct hclge_dev *hdev = vport->back; | |
5089 | ||
5090 | return hdev->reset_count; | |
5091 | } | |
5092 | ||
d1f04a80 JS |
5093 | static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) |
5094 | { | |
5095 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5096 | struct hclge_dev *hdev = vport->back; | |
5097 | ||
5098 | hdev->fd_cfg.fd_en = enable; | |
5099 | if (!enable) | |
5100 | hclge_del_all_fd_entries(handle, false); | |
5101 | else | |
5102 | hclge_restore_fd_entries(handle); | |
5103 | } | |
5104 | ||
46a3df9f S |
5105 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) |
5106 | { | |
5107 | struct hclge_desc desc; | |
d44f9b63 YL |
5108 | struct hclge_config_mac_mode_cmd *req = |
5109 | (struct hclge_config_mac_mode_cmd *)desc.data; | |
a90bb9a5 | 5110 | u32 loop_en = 0; |
46a3df9f S |
5111 | int ret; |
5112 | ||
5113 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); | |
ccc23ef3 PL |
5114 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); |
5115 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); | |
5116 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); | |
5117 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); | |
5118 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); | |
5119 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); | |
5120 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); | |
5121 | hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); | |
5122 | hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); | |
5123 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); | |
5124 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); | |
5125 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); | |
5126 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); | |
5127 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); | |
a90bb9a5 | 5128 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); |
46a3df9f S |
5129 | |
5130 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5131 | if (ret) | |
5132 | dev_err(&hdev->pdev->dev, | |
5133 | "mac enable fail, ret =%d.\n", ret); | |
5134 | } | |
5135 | ||
67b8c316 | 5136 | static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) |
c39c4d98 | 5137 | { |
c39c4d98 | 5138 | struct hclge_config_mac_mode_cmd *req; |
c39c4d98 YL |
5139 | struct hclge_desc desc; |
5140 | u32 loop_en; | |
5141 | int ret; | |
5142 | ||
e67d9ce9 YL |
5143 | req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; |
5144 | /* 1 Read out the MAC mode config at first */ | |
5145 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); | |
5146 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5147 | if (ret) { | |
5148 | dev_err(&hdev->pdev->dev, | |
5149 | "mac loopback get fail, ret =%d.\n", ret); | |
5150 | return ret; | |
5151 | } | |
c39c4d98 | 5152 | |
e67d9ce9 YL |
5153 | /* 2 Then setup the loopback flag */ |
5154 | loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); | |
ccc23ef3 | 5155 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); |
3ebc5e0b YL |
5156 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); |
5157 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); | |
e67d9ce9 YL |
5158 | |
5159 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); | |
c39c4d98 | 5160 | |
e67d9ce9 YL |
5161 | /* 3 Config mac work mode with loopback flag |
5162 | * and its original configure parameters | |
5163 | */ | |
5164 | hclge_cmd_reuse_desc(&desc, false); | |
5165 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5166 | if (ret) | |
5167 | dev_err(&hdev->pdev->dev, | |
5168 | "mac loopback set fail, ret =%d.\n", ret); | |
5169 | return ret; | |
5170 | } | |
c39c4d98 | 5171 | |
86957272 FL |
5172 | static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, |
5173 | enum hnae3_loop loop_mode) | |
e006bb00 PL |
5174 | { |
5175 | #define HCLGE_SERDES_RETRY_MS 10 | |
5176 | #define HCLGE_SERDES_RETRY_NUM 100 | |
5177 | struct hclge_serdes_lb_cmd *req; | |
5178 | struct hclge_desc desc; | |
5179 | int ret, i = 0; | |
86957272 | 5180 | u8 loop_mode_b; |
e006bb00 | 5181 | |
855f03fb | 5182 | req = (struct hclge_serdes_lb_cmd *)desc.data; |
e006bb00 PL |
5183 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); |
5184 | ||
86957272 FL |
5185 | switch (loop_mode) { |
5186 | case HNAE3_LOOP_SERIAL_SERDES: | |
5187 | loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; | |
5188 | break; | |
5189 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5190 | loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; | |
5191 | break; | |
5192 | default: | |
5193 | dev_err(&hdev->pdev->dev, | |
5194 | "unsupported serdes loopback mode %d\n", loop_mode); | |
5195 | return -ENOTSUPP; | |
5196 | } | |
5197 | ||
e006bb00 | 5198 | if (en) { |
86957272 FL |
5199 | req->enable = loop_mode_b; |
5200 | req->mask = loop_mode_b; | |
e006bb00 | 5201 | } else { |
86957272 | 5202 | req->mask = loop_mode_b; |
e006bb00 PL |
5203 | } |
5204 | ||
5205 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5206 | if (ret) { | |
5207 | dev_err(&hdev->pdev->dev, | |
5208 | "serdes loopback set fail, ret = %d\n", ret); | |
5209 | return ret; | |
5210 | } | |
5211 | ||
5212 | do { | |
5213 | msleep(HCLGE_SERDES_RETRY_MS); | |
5214 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, | |
5215 | true); | |
5216 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5217 | if (ret) { | |
5218 | dev_err(&hdev->pdev->dev, | |
5219 | "serdes loopback get, ret = %d\n", ret); | |
5220 | return ret; | |
5221 | } | |
5222 | } while (++i < HCLGE_SERDES_RETRY_NUM && | |
5223 | !(req->result & HCLGE_CMD_SERDES_DONE_B)); | |
5224 | ||
5225 | if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { | |
5226 | dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); | |
5227 | return -EBUSY; | |
5228 | } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { | |
5229 | dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); | |
5230 | return -EIO; | |
5231 | } | |
5232 | ||
3ebc5e0b | 5233 | hclge_cfg_mac_mode(hdev, en); |
e006bb00 PL |
5234 | return 0; |
5235 | } | |
5236 | ||
3ebc5e0b YL |
5237 | static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, |
5238 | int stream_id, bool enable) | |
5239 | { | |
5240 | struct hclge_desc desc; | |
5241 | struct hclge_cfg_com_tqp_queue_cmd *req = | |
5242 | (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; | |
5243 | int ret; | |
5244 | ||
5245 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); | |
5246 | req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); | |
5247 | req->stream_id = cpu_to_le16(stream_id); | |
5248 | req->enable |= enable << HCLGE_TQP_ENABLE_B; | |
5249 | ||
5250 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5251 | if (ret) | |
5252 | dev_err(&hdev->pdev->dev, | |
5253 | "Tqp enable fail, status =%d.\n", ret); | |
5254 | return ret; | |
5255 | } | |
5256 | ||
e67d9ce9 YL |
5257 | static int hclge_set_loopback(struct hnae3_handle *handle, |
5258 | enum hnae3_loop loop_mode, bool en) | |
5259 | { | |
5260 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5261 | struct hclge_dev *hdev = vport->back; | |
3ebc5e0b | 5262 | int i, ret; |
e67d9ce9 YL |
5263 | |
5264 | switch (loop_mode) { | |
67b8c316 FL |
5265 | case HNAE3_LOOP_APP: |
5266 | ret = hclge_set_app_loopback(hdev, en); | |
c39c4d98 | 5267 | break; |
86957272 FL |
5268 | case HNAE3_LOOP_SERIAL_SERDES: |
5269 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5270 | ret = hclge_set_serdes_loopback(hdev, en, loop_mode); | |
e006bb00 | 5271 | break; |
c39c4d98 YL |
5272 | default: |
5273 | ret = -ENOTSUPP; | |
5274 | dev_err(&hdev->pdev->dev, | |
5275 | "loop_mode %d is not supported\n", loop_mode); | |
5276 | break; | |
5277 | } | |
5278 | ||
3ebc5e0b YL |
5279 | for (i = 0; i < vport->alloc_tqps; i++) { |
5280 | ret = hclge_tqp_enable(hdev, i, 0, en); | |
5281 | if (ret) | |
5282 | return ret; | |
5283 | } | |
46a3df9f | 5284 | |
3ebc5e0b | 5285 | return 0; |
46a3df9f S |
5286 | } |
5287 | ||
5288 | static void hclge_reset_tqp_stats(struct hnae3_handle *handle) | |
5289 | { | |
5290 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5291 | struct hnae3_queue *queue; | |
5292 | struct hclge_tqp *tqp; | |
5293 | int i; | |
5294 | ||
5295 | for (i = 0; i < vport->alloc_tqps; i++) { | |
5296 | queue = handle->kinfo.tqp[i]; | |
5297 | tqp = container_of(queue, struct hclge_tqp, q); | |
5298 | memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); | |
5299 | } | |
5300 | } | |
5301 | ||
fad0e9d8 JS |
5302 | static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) |
5303 | { | |
5304 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5305 | struct hclge_dev *hdev = vport->back; | |
5306 | ||
5307 | if (enable) { | |
5308 | mod_timer(&hdev->service_timer, jiffies + HZ); | |
5309 | } else { | |
5310 | del_timer_sync(&hdev->service_timer); | |
5311 | cancel_work_sync(&hdev->service_task); | |
5312 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); | |
5313 | } | |
5314 | } | |
5315 | ||
46a3df9f S |
5316 | static int hclge_ae_start(struct hnae3_handle *handle) |
5317 | { | |
5318 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5319 | struct hclge_dev *hdev = vport->back; | |
46a3df9f | 5320 | |
46a3df9f S |
5321 | /* mac enable */ |
5322 | hclge_cfg_mac_mode(hdev, true); | |
5323 | clear_bit(HCLGE_STATE_DOWN, &hdev->state); | |
3ae84019 | 5324 | hdev->hw.mac.link = 0; |
46a3df9f | 5325 | |
f9637cc2 PL |
5326 | /* reset tqp stats */ |
5327 | hclge_reset_tqp_stats(handle); | |
5328 | ||
dda6b7d5 | 5329 | hclge_mac_start_phy(hdev); |
46a3df9f | 5330 | |
46a3df9f S |
5331 | return 0; |
5332 | } | |
5333 | ||
5334 | static void hclge_ae_stop(struct hnae3_handle *handle) | |
5335 | { | |
5336 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5337 | struct hclge_dev *hdev = vport->back; | |
94cce1d5 | 5338 | int i; |
46a3df9f | 5339 | |
4ee3e5a8 FL |
5340 | set_bit(HCLGE_STATE_DOWN, &hdev->state); |
5341 | ||
48ac80db HT |
5342 | /* If it is not PF reset, the firmware will disable the MAC, |
5343 | * so it only need to stop phy here. | |
5344 | */ | |
5345 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && | |
5346 | hdev->reset_type != HNAE3_FUNC_RESET) { | |
4486f5c9 | 5347 | hclge_mac_stop_phy(hdev); |
f9637cc2 | 5348 | return; |
4486f5c9 | 5349 | } |
f9637cc2 | 5350 | |
94cce1d5 HT |
5351 | for (i = 0; i < handle->kinfo.num_tqps; i++) |
5352 | hclge_reset_tqp(handle, i); | |
5353 | ||
46a3df9f S |
5354 | /* Mac disable */ |
5355 | hclge_cfg_mac_mode(hdev, false); | |
5356 | ||
5357 | hclge_mac_stop_phy(hdev); | |
5358 | ||
5359 | /* reset tqp stats */ | |
5360 | hclge_reset_tqp_stats(handle); | |
b91fb71c | 5361 | hclge_update_link_status(hdev); |
46a3df9f S |
5362 | } |
5363 | ||
337460de YL |
5364 | int hclge_vport_start(struct hclge_vport *vport) |
5365 | { | |
5366 | set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5367 | vport->last_active_jiffies = jiffies; | |
5368 | return 0; | |
5369 | } | |
5370 | ||
5371 | void hclge_vport_stop(struct hclge_vport *vport) | |
5372 | { | |
5373 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5374 | } | |
5375 | ||
5376 | static int hclge_client_start(struct hnae3_handle *handle) | |
5377 | { | |
5378 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5379 | ||
5380 | return hclge_vport_start(vport); | |
5381 | } | |
5382 | ||
5383 | static void hclge_client_stop(struct hnae3_handle *handle) | |
5384 | { | |
5385 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5386 | ||
5387 | hclge_vport_stop(vport); | |
5388 | } | |
5389 | ||
46a3df9f S |
5390 | static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, |
5391 | u16 cmdq_resp, u8 resp_code, | |
5392 | enum hclge_mac_vlan_tbl_opcode op) | |
5393 | { | |
5394 | struct hclge_dev *hdev = vport->back; | |
5395 | int return_status = -EIO; | |
5396 | ||
5397 | if (cmdq_resp) { | |
5398 | dev_err(&hdev->pdev->dev, | |
5399 | "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", | |
5400 | cmdq_resp); | |
5401 | return -EIO; | |
5402 | } | |
5403 | ||
5404 | if (op == HCLGE_MAC_VLAN_ADD) { | |
5405 | if ((!resp_code) || (resp_code == 1)) { | |
5406 | return_status = 0; | |
5407 | } else if (resp_code == 2) { | |
2f894c5b | 5408 | return_status = -ENOSPC; |
46a3df9f S |
5409 | dev_err(&hdev->pdev->dev, |
5410 | "add mac addr failed for uc_overflow.\n"); | |
5411 | } else if (resp_code == 3) { | |
2f894c5b | 5412 | return_status = -ENOSPC; |
46a3df9f S |
5413 | dev_err(&hdev->pdev->dev, |
5414 | "add mac addr failed for mc_overflow.\n"); | |
5415 | } else { | |
5416 | dev_err(&hdev->pdev->dev, | |
5417 | "add mac addr failed for undefined, code=%d.\n", | |
5418 | resp_code); | |
5419 | } | |
5420 | } else if (op == HCLGE_MAC_VLAN_REMOVE) { | |
5421 | if (!resp_code) { | |
5422 | return_status = 0; | |
5423 | } else if (resp_code == 1) { | |
2f894c5b | 5424 | return_status = -ENOENT; |
46a3df9f S |
5425 | dev_dbg(&hdev->pdev->dev, |
5426 | "remove mac addr failed for miss.\n"); | |
5427 | } else { | |
5428 | dev_err(&hdev->pdev->dev, | |
5429 | "remove mac addr failed for undefined, code=%d.\n", | |
5430 | resp_code); | |
5431 | } | |
5432 | } else if (op == HCLGE_MAC_VLAN_LKUP) { | |
5433 | if (!resp_code) { | |
5434 | return_status = 0; | |
5435 | } else if (resp_code == 1) { | |
2f894c5b | 5436 | return_status = -ENOENT; |
46a3df9f S |
5437 | dev_dbg(&hdev->pdev->dev, |
5438 | "lookup mac addr failed for miss.\n"); | |
5439 | } else { | |
5440 | dev_err(&hdev->pdev->dev, | |
5441 | "lookup mac addr failed for undefined, code=%d.\n", | |
5442 | resp_code); | |
5443 | } | |
5444 | } else { | |
2f894c5b | 5445 | return_status = -EINVAL; |
46a3df9f S |
5446 | dev_err(&hdev->pdev->dev, |
5447 | "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", | |
5448 | op); | |
5449 | } | |
5450 | ||
5451 | return return_status; | |
5452 | } | |
5453 | ||
5454 | static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) | |
5455 | { | |
5456 | int word_num; | |
5457 | int bit_num; | |
5458 | ||
5459 | if (vfid > 255 || vfid < 0) | |
5460 | return -EIO; | |
5461 | ||
5462 | if (vfid >= 0 && vfid <= 191) { | |
5463 | word_num = vfid / 32; | |
5464 | bit_num = vfid % 32; | |
5465 | if (clr) | |
a90bb9a5 | 5466 | desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5467 | else |
a90bb9a5 | 5468 | desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5469 | } else { |
5470 | word_num = (vfid - 192) / 32; | |
5471 | bit_num = vfid % 32; | |
5472 | if (clr) | |
a90bb9a5 | 5473 | desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5474 | else |
a90bb9a5 | 5475 | desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5476 | } |
5477 | ||
5478 | return 0; | |
5479 | } | |
5480 | ||
5481 | static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) | |
5482 | { | |
5483 | #define HCLGE_DESC_NUMBER 3 | |
5484 | #define HCLGE_FUNC_NUMBER_PER_DESC 6 | |
5485 | int i, j; | |
5486 | ||
5487 | for (i = 0; i < HCLGE_DESC_NUMBER; i++) | |
5488 | for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) | |
5489 | if (desc[i].data[j]) | |
5490 | return false; | |
5491 | ||
5492 | return true; | |
5493 | } | |
5494 | ||
d44f9b63 | 5495 | static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, |
46a3df9f S |
5496 | const u8 *addr) |
5497 | { | |
5498 | const unsigned char *mac_addr = addr; | |
5499 | u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | | |
5500 | (mac_addr[0]) | (mac_addr[1] << 8); | |
5501 | u32 low_val = mac_addr[4] | (mac_addr[5] << 8); | |
5502 | ||
5503 | new_req->mac_addr_hi32 = cpu_to_le32(high_val); | |
5504 | new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); | |
5505 | } | |
5506 | ||
46a3df9f | 5507 | static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, |
d44f9b63 | 5508 | struct hclge_mac_vlan_tbl_entry_cmd *req) |
46a3df9f S |
5509 | { |
5510 | struct hclge_dev *hdev = vport->back; | |
5511 | struct hclge_desc desc; | |
5512 | u8 resp_code; | |
a90bb9a5 | 5513 | u16 retval; |
46a3df9f S |
5514 | int ret; |
5515 | ||
5516 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); | |
5517 | ||
d44f9b63 | 5518 | memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5519 | |
5520 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5521 | if (ret) { | |
5522 | dev_err(&hdev->pdev->dev, | |
5523 | "del mac addr failed for cmd_send, ret =%d.\n", | |
5524 | ret); | |
5525 | return ret; | |
5526 | } | |
a90bb9a5 YL |
5527 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5528 | retval = le16_to_cpu(desc.retval); | |
46a3df9f | 5529 | |
a90bb9a5 | 5530 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5531 | HCLGE_MAC_VLAN_REMOVE); |
5532 | } | |
5533 | ||
5534 | static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5535 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5536 | struct hclge_desc *desc, |
5537 | bool is_mc) | |
5538 | { | |
5539 | struct hclge_dev *hdev = vport->back; | |
5540 | u8 resp_code; | |
a90bb9a5 | 5541 | u16 retval; |
46a3df9f S |
5542 | int ret; |
5543 | ||
5544 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); | |
5545 | if (is_mc) { | |
5546 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5547 | memcpy(desc[0].data, | |
5548 | req, | |
d44f9b63 | 5549 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5550 | hclge_cmd_setup_basic_desc(&desc[1], |
5551 | HCLGE_OPC_MAC_VLAN_ADD, | |
5552 | true); | |
5553 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5554 | hclge_cmd_setup_basic_desc(&desc[2], | |
5555 | HCLGE_OPC_MAC_VLAN_ADD, | |
5556 | true); | |
5557 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
5558 | } else { | |
5559 | memcpy(desc[0].data, | |
5560 | req, | |
d44f9b63 | 5561 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5562 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
5563 | } | |
5564 | if (ret) { | |
5565 | dev_err(&hdev->pdev->dev, | |
5566 | "lookup mac addr failed for cmd_send, ret =%d.\n", | |
5567 | ret); | |
5568 | return ret; | |
5569 | } | |
a90bb9a5 YL |
5570 | resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; |
5571 | retval = le16_to_cpu(desc[0].retval); | |
46a3df9f | 5572 | |
a90bb9a5 | 5573 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5574 | HCLGE_MAC_VLAN_LKUP); |
5575 | } | |
5576 | ||
5577 | static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5578 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5579 | struct hclge_desc *mc_desc) |
5580 | { | |
5581 | struct hclge_dev *hdev = vport->back; | |
5582 | int cfg_status; | |
5583 | u8 resp_code; | |
a90bb9a5 | 5584 | u16 retval; |
46a3df9f S |
5585 | int ret; |
5586 | ||
5587 | if (!mc_desc) { | |
5588 | struct hclge_desc desc; | |
5589 | ||
5590 | hclge_cmd_setup_basic_desc(&desc, | |
5591 | HCLGE_OPC_MAC_VLAN_ADD, | |
5592 | false); | |
d44f9b63 YL |
5593 | memcpy(desc.data, req, |
5594 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); | |
46a3df9f | 5595 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
a90bb9a5 YL |
5596 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5597 | retval = le16_to_cpu(desc.retval); | |
5598 | ||
5599 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5600 | resp_code, |
5601 | HCLGE_MAC_VLAN_ADD); | |
5602 | } else { | |
c3b6f755 | 5603 | hclge_cmd_reuse_desc(&mc_desc[0], false); |
46a3df9f | 5604 | mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5605 | hclge_cmd_reuse_desc(&mc_desc[1], false); |
46a3df9f | 5606 | mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5607 | hclge_cmd_reuse_desc(&mc_desc[2], false); |
46a3df9f S |
5608 | mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); |
5609 | memcpy(mc_desc[0].data, req, | |
d44f9b63 | 5610 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f | 5611 | ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); |
a90bb9a5 YL |
5612 | resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; |
5613 | retval = le16_to_cpu(mc_desc[0].retval); | |
5614 | ||
5615 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5616 | resp_code, |
5617 | HCLGE_MAC_VLAN_ADD); | |
5618 | } | |
5619 | ||
5620 | if (ret) { | |
5621 | dev_err(&hdev->pdev->dev, | |
5622 | "add mac addr failed for cmd_send, ret =%d.\n", | |
5623 | ret); | |
5624 | return ret; | |
5625 | } | |
5626 | ||
5627 | return cfg_status; | |
5628 | } | |
5629 | ||
2da5ec58 JS |
5630 | static int hclge_init_umv_space(struct hclge_dev *hdev) |
5631 | { | |
5632 | u16 allocated_size = 0; | |
5633 | int ret; | |
5634 | ||
5635 | ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, | |
5636 | true); | |
5637 | if (ret) | |
5638 | return ret; | |
5639 | ||
5640 | if (allocated_size < hdev->wanted_umv_size) | |
5641 | dev_warn(&hdev->pdev->dev, | |
5642 | "Alloc umv space failed, want %d, get %d\n", | |
5643 | hdev->wanted_umv_size, allocated_size); | |
5644 | ||
5645 | mutex_init(&hdev->umv_mutex); | |
5646 | hdev->max_umv_size = allocated_size; | |
5647 | hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); | |
5648 | hdev->share_umv_size = hdev->priv_umv_size + | |
5649 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5650 | ||
5651 | return 0; | |
5652 | } | |
5653 | ||
5654 | static int hclge_uninit_umv_space(struct hclge_dev *hdev) | |
5655 | { | |
5656 | int ret; | |
5657 | ||
5658 | if (hdev->max_umv_size > 0) { | |
5659 | ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, | |
5660 | false); | |
5661 | if (ret) | |
5662 | return ret; | |
5663 | hdev->max_umv_size = 0; | |
5664 | } | |
5665 | mutex_destroy(&hdev->umv_mutex); | |
5666 | ||
5667 | return 0; | |
5668 | } | |
5669 | ||
5670 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, | |
5671 | u16 *allocated_size, bool is_alloc) | |
5672 | { | |
5673 | struct hclge_umv_spc_alc_cmd *req; | |
5674 | struct hclge_desc desc; | |
5675 | int ret; | |
5676 | ||
5677 | req = (struct hclge_umv_spc_alc_cmd *)desc.data; | |
5678 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); | |
5679 | hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); | |
5680 | req->space_size = cpu_to_le32(space_size); | |
5681 | ||
5682 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5683 | if (ret) { | |
5684 | dev_err(&hdev->pdev->dev, | |
5685 | "%s umv space failed for cmd_send, ret =%d\n", | |
5686 | is_alloc ? "allocate" : "free", ret); | |
5687 | return ret; | |
5688 | } | |
5689 | ||
5690 | if (is_alloc && allocated_size) | |
5691 | *allocated_size = le32_to_cpu(desc.data[1]); | |
5692 | ||
5693 | return 0; | |
5694 | } | |
5695 | ||
5696 | static void hclge_reset_umv_space(struct hclge_dev *hdev) | |
5697 | { | |
5698 | struct hclge_vport *vport; | |
5699 | int i; | |
5700 | ||
5701 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
5702 | vport = &hdev->vport[i]; | |
5703 | vport->used_umv_num = 0; | |
5704 | } | |
5705 | ||
5706 | mutex_lock(&hdev->umv_mutex); | |
5707 | hdev->share_umv_size = hdev->priv_umv_size + | |
5708 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5709 | mutex_unlock(&hdev->umv_mutex); | |
5710 | } | |
5711 | ||
5712 | static bool hclge_is_umv_space_full(struct hclge_vport *vport) | |
5713 | { | |
5714 | struct hclge_dev *hdev = vport->back; | |
5715 | bool is_full; | |
5716 | ||
5717 | mutex_lock(&hdev->umv_mutex); | |
5718 | is_full = (vport->used_umv_num >= hdev->priv_umv_size && | |
5719 | hdev->share_umv_size == 0); | |
5720 | mutex_unlock(&hdev->umv_mutex); | |
5721 | ||
5722 | return is_full; | |
5723 | } | |
5724 | ||
5725 | static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) | |
5726 | { | |
5727 | struct hclge_dev *hdev = vport->back; | |
5728 | ||
5729 | mutex_lock(&hdev->umv_mutex); | |
5730 | if (is_free) { | |
5731 | if (vport->used_umv_num > hdev->priv_umv_size) | |
5732 | hdev->share_umv_size++; | |
5733 | vport->used_umv_num--; | |
5734 | } else { | |
5735 | if (vport->used_umv_num >= hdev->priv_umv_size) | |
5736 | hdev->share_umv_size--; | |
5737 | vport->used_umv_num++; | |
5738 | } | |
5739 | mutex_unlock(&hdev->umv_mutex); | |
5740 | } | |
5741 | ||
46a3df9f S |
5742 | static int hclge_add_uc_addr(struct hnae3_handle *handle, |
5743 | const unsigned char *addr) | |
5744 | { | |
5745 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5746 | ||
5747 | return hclge_add_uc_addr_common(vport, addr); | |
5748 | } | |
5749 | ||
5750 | int hclge_add_uc_addr_common(struct hclge_vport *vport, | |
5751 | const unsigned char *addr) | |
5752 | { | |
5753 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5754 | struct hclge_mac_vlan_tbl_entry_cmd req; |
bf88f41f | 5755 | struct hclge_desc desc; |
a90bb9a5 | 5756 | u16 egress_port = 0; |
04f0c72a | 5757 | int ret; |
46a3df9f S |
5758 | |
5759 | /* mac addr check */ | |
5760 | if (is_zero_ether_addr(addr) || | |
5761 | is_broadcast_ether_addr(addr) || | |
5762 | is_multicast_ether_addr(addr)) { | |
5763 | dev_err(&hdev->pdev->dev, | |
5764 | "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", | |
5765 | addr, | |
5766 | is_zero_ether_addr(addr), | |
5767 | is_broadcast_ether_addr(addr), | |
5768 | is_multicast_ether_addr(addr)); | |
5769 | return -EINVAL; | |
5770 | } | |
5771 | ||
5772 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 | 5773 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
a90bb9a5 | 5774 | |
ccc23ef3 PL |
5775 | hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, |
5776 | HCLGE_MAC_EPORT_VFID_S, vport->vport_id); | |
a90bb9a5 YL |
5777 | |
5778 | req.egress_port = cpu_to_le16(egress_port); | |
46a3df9f S |
5779 | |
5780 | hclge_prepare_mac_addr(&req, addr); | |
5781 | ||
bf88f41f JS |
5782 | /* Lookup the mac address in the mac_vlan table, and add |
5783 | * it if the entry is inexistent. Repeated unicast entry | |
5784 | * is not allowed in the mac vlan table. | |
5785 | */ | |
5786 | ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); | |
2da5ec58 JS |
5787 | if (ret == -ENOENT) { |
5788 | if (!hclge_is_umv_space_full(vport)) { | |
5789 | ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); | |
5790 | if (!ret) | |
5791 | hclge_update_umv_space(vport, false); | |
5792 | return ret; | |
5793 | } | |
5794 | ||
5795 | dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", | |
5796 | hdev->priv_umv_size); | |
5797 | ||
5798 | return -ENOSPC; | |
5799 | } | |
bf88f41f JS |
5800 | |
5801 | /* check if we just hit the duplicate */ | |
5802 | if (!ret) | |
5803 | ret = -EINVAL; | |
5804 | ||
5805 | dev_err(&hdev->pdev->dev, | |
5806 | "PF failed to add unicast entry(%pM) in the MAC table\n", | |
5807 | addr); | |
46a3df9f | 5808 | |
04f0c72a | 5809 | return ret; |
46a3df9f S |
5810 | } |
5811 | ||
5812 | static int hclge_rm_uc_addr(struct hnae3_handle *handle, | |
5813 | const unsigned char *addr) | |
5814 | { | |
5815 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5816 | ||
5817 | return hclge_rm_uc_addr_common(vport, addr); | |
5818 | } | |
5819 | ||
5820 | int hclge_rm_uc_addr_common(struct hclge_vport *vport, | |
5821 | const unsigned char *addr) | |
5822 | { | |
5823 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5824 | struct hclge_mac_vlan_tbl_entry_cmd req; |
04f0c72a | 5825 | int ret; |
46a3df9f S |
5826 | |
5827 | /* mac addr check */ | |
5828 | if (is_zero_ether_addr(addr) || | |
5829 | is_broadcast_ether_addr(addr) || | |
5830 | is_multicast_ether_addr(addr)) { | |
5831 | dev_dbg(&hdev->pdev->dev, | |
5832 | "Remove mac err! invalid mac:%pM.\n", | |
5833 | addr); | |
5834 | return -EINVAL; | |
5835 | } | |
5836 | ||
5837 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5838 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5839 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
46a3df9f | 5840 | hclge_prepare_mac_addr(&req, addr); |
04f0c72a | 5841 | ret = hclge_remove_mac_vlan_tbl(vport, &req); |
2da5ec58 JS |
5842 | if (!ret) |
5843 | hclge_update_umv_space(vport, true); | |
46a3df9f | 5844 | |
04f0c72a | 5845 | return ret; |
46a3df9f S |
5846 | } |
5847 | ||
5848 | static int hclge_add_mc_addr(struct hnae3_handle *handle, | |
5849 | const unsigned char *addr) | |
5850 | { | |
5851 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5852 | ||
2bf8098b | 5853 | return hclge_add_mc_addr_common(vport, addr); |
46a3df9f S |
5854 | } |
5855 | ||
5856 | int hclge_add_mc_addr_common(struct hclge_vport *vport, | |
5857 | const unsigned char *addr) | |
5858 | { | |
5859 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5860 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f | 5861 | struct hclge_desc desc[3]; |
46a3df9f S |
5862 | int status; |
5863 | ||
5864 | /* mac addr check */ | |
5865 | if (!is_multicast_ether_addr(addr)) { | |
5866 | dev_err(&hdev->pdev->dev, | |
5867 | "Add mc mac err! invalid mac:%pM.\n", | |
5868 | addr); | |
5869 | return -EINVAL; | |
5870 | } | |
5871 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5872 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5873 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5874 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
738a3401 | 5875 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5876 | hclge_prepare_mac_addr(&req, addr); |
5877 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5878 | if (!status) { | |
5879 | /* This mac addr exist, update VFID for it */ | |
5880 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5881 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5882 | } else { | |
5883 | /* This mac addr do not exist, add new entry for it */ | |
5884 | memset(desc[0].data, 0, sizeof(desc[0].data)); | |
5885 | memset(desc[1].data, 0, sizeof(desc[0].data)); | |
5886 | memset(desc[2].data, 0, sizeof(desc[0].data)); | |
5887 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5888 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5889 | } | |
5890 | ||
55b049be JS |
5891 | if (status == -ENOSPC) |
5892 | dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); | |
46a3df9f S |
5893 | |
5894 | return status; | |
5895 | } | |
5896 | ||
5897 | static int hclge_rm_mc_addr(struct hnae3_handle *handle, | |
5898 | const unsigned char *addr) | |
5899 | { | |
5900 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5901 | ||
5902 | return hclge_rm_mc_addr_common(vport, addr); | |
5903 | } | |
5904 | ||
5905 | int hclge_rm_mc_addr_common(struct hclge_vport *vport, | |
5906 | const unsigned char *addr) | |
5907 | { | |
5908 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5909 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
5910 | enum hclge_cmd_status status; |
5911 | struct hclge_desc desc[3]; | |
46a3df9f S |
5912 | |
5913 | /* mac addr check */ | |
5914 | if (!is_multicast_ether_addr(addr)) { | |
5915 | dev_dbg(&hdev->pdev->dev, | |
5916 | "Remove mc mac err! invalid mac:%pM.\n", | |
5917 | addr); | |
5918 | return -EINVAL; | |
5919 | } | |
5920 | ||
5921 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5922 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5923 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5924 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
738a3401 | 5925 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5926 | hclge_prepare_mac_addr(&req, addr); |
5927 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5928 | if (!status) { | |
5929 | /* This mac addr exist, remove this handle's VFID for it */ | |
5930 | hclge_update_desc_vfid(desc, vport->vport_id, true); | |
5931 | ||
5932 | if (hclge_is_all_function_id_zero(desc)) | |
5933 | /* All the vfid is zero, so need to delete this entry */ | |
5934 | status = hclge_remove_mac_vlan_tbl(vport, &req); | |
5935 | else | |
5936 | /* Not all the vfid is zero, update the vfid */ | |
5937 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5938 | ||
5939 | } else { | |
a832d8b5 XW |
5940 | /* Maybe this mac address is in mta table, but it cannot be |
5941 | * deleted here because an entry of mta represents an address | |
5942 | * range rather than a specific address. the delete action to | |
5943 | * all entries will take effect in update_mta_status called by | |
5944 | * hns3_nic_set_rx_mode. | |
5945 | */ | |
5946 | status = 0; | |
46a3df9f S |
5947 | } |
5948 | ||
46a3df9f S |
5949 | return status; |
5950 | } | |
5951 | ||
635bfb58 FL |
5952 | static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, |
5953 | u16 cmdq_resp, u8 resp_code) | |
5954 | { | |
5955 | #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 | |
5956 | #define HCLGE_ETHERTYPE_ALREADY_ADD 1 | |
5957 | #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 | |
5958 | #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 | |
5959 | ||
5960 | int return_status; | |
5961 | ||
5962 | if (cmdq_resp) { | |
5963 | dev_err(&hdev->pdev->dev, | |
5964 | "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", | |
5965 | cmdq_resp); | |
5966 | return -EIO; | |
5967 | } | |
5968 | ||
5969 | switch (resp_code) { | |
5970 | case HCLGE_ETHERTYPE_SUCCESS_ADD: | |
5971 | case HCLGE_ETHERTYPE_ALREADY_ADD: | |
5972 | return_status = 0; | |
5973 | break; | |
5974 | case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: | |
5975 | dev_err(&hdev->pdev->dev, | |
5976 | "add mac ethertype failed for manager table overflow.\n"); | |
5977 | return_status = -EIO; | |
5978 | break; | |
5979 | case HCLGE_ETHERTYPE_KEY_CONFLICT: | |
5980 | dev_err(&hdev->pdev->dev, | |
5981 | "add mac ethertype failed for key conflict.\n"); | |
5982 | return_status = -EIO; | |
5983 | break; | |
5984 | default: | |
5985 | dev_err(&hdev->pdev->dev, | |
5986 | "add mac ethertype failed for undefined, code=%d.\n", | |
5987 | resp_code); | |
5988 | return_status = -EIO; | |
5989 | } | |
5990 | ||
5991 | return return_status; | |
5992 | } | |
5993 | ||
5994 | static int hclge_add_mgr_tbl(struct hclge_dev *hdev, | |
5995 | const struct hclge_mac_mgr_tbl_entry_cmd *req) | |
5996 | { | |
5997 | struct hclge_desc desc; | |
5998 | u8 resp_code; | |
5999 | u16 retval; | |
6000 | int ret; | |
6001 | ||
6002 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); | |
6003 | memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); | |
6004 | ||
6005 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6006 | if (ret) { | |
6007 | dev_err(&hdev->pdev->dev, | |
6008 | "add mac ethertype failed for cmd_send, ret =%d.\n", | |
6009 | ret); | |
6010 | return ret; | |
6011 | } | |
6012 | ||
6013 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; | |
6014 | retval = le16_to_cpu(desc.retval); | |
6015 | ||
6016 | return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); | |
6017 | } | |
6018 | ||
6019 | static int init_mgr_tbl(struct hclge_dev *hdev) | |
6020 | { | |
6021 | int ret; | |
6022 | int i; | |
6023 | ||
6024 | for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { | |
6025 | ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); | |
6026 | if (ret) { | |
6027 | dev_err(&hdev->pdev->dev, | |
6028 | "add mac ethertype failed, ret =%d.\n", | |
6029 | ret); | |
6030 | return ret; | |
6031 | } | |
6032 | } | |
6033 | ||
6034 | return 0; | |
6035 | } | |
6036 | ||
46a3df9f S |
6037 | static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) |
6038 | { | |
6039 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6040 | struct hclge_dev *hdev = vport->back; | |
6041 | ||
6042 | ether_addr_copy(p, hdev->hw.mac.mac_addr); | |
6043 | } | |
6044 | ||
3cbf5e2d FL |
6045 | static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, |
6046 | bool is_first) | |
46a3df9f S |
6047 | { |
6048 | const unsigned char *new_addr = (const unsigned char *)p; | |
6049 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6050 | struct hclge_dev *hdev = vport->back; | |
20a5c4c0 | 6051 | int ret; |
46a3df9f S |
6052 | |
6053 | /* mac addr check */ | |
6054 | if (is_zero_ether_addr(new_addr) || | |
6055 | is_broadcast_ether_addr(new_addr) || | |
6056 | is_multicast_ether_addr(new_addr)) { | |
6057 | dev_err(&hdev->pdev->dev, | |
6058 | "Change uc mac err! invalid mac:%p.\n", | |
6059 | new_addr); | |
6060 | return -EINVAL; | |
6061 | } | |
6062 | ||
3cbf5e2d | 6063 | if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) |
20a5c4c0 | 6064 | dev_warn(&hdev->pdev->dev, |
3cbf5e2d | 6065 | "remove old uc mac address fail.\n"); |
46a3df9f | 6066 | |
20a5c4c0 FL |
6067 | ret = hclge_add_uc_addr(handle, new_addr); |
6068 | if (ret) { | |
6069 | dev_err(&hdev->pdev->dev, | |
6070 | "add uc mac address fail, ret =%d.\n", | |
6071 | ret); | |
6072 | ||
3cbf5e2d FL |
6073 | if (!is_first && |
6074 | hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) | |
20a5c4c0 | 6075 | dev_err(&hdev->pdev->dev, |
3cbf5e2d | 6076 | "restore uc mac address fail.\n"); |
20a5c4c0 FL |
6077 | |
6078 | return -EIO; | |
46a3df9f S |
6079 | } |
6080 | ||
532fdd5e | 6081 | ret = hclge_pause_addr_cfg(hdev, new_addr); |
20a5c4c0 FL |
6082 | if (ret) { |
6083 | dev_err(&hdev->pdev->dev, | |
6084 | "configure mac pause address fail, ret =%d.\n", | |
6085 | ret); | |
6086 | return -EIO; | |
6087 | } | |
6088 | ||
6089 | ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); | |
6090 | ||
6091 | return 0; | |
46a3df9f S |
6092 | } |
6093 | ||
a185d723 XW |
6094 | static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, |
6095 | int cmd) | |
6096 | { | |
6097 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6098 | struct hclge_dev *hdev = vport->back; | |
6099 | ||
6100 | if (!hdev->hw.mac.phydev) | |
6101 | return -EOPNOTSUPP; | |
6102 | ||
6103 | return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); | |
6104 | } | |
6105 | ||
46a3df9f | 6106 | static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, |
0e44d430 | 6107 | u8 fe_type, bool filter_en) |
46a3df9f | 6108 | { |
d44f9b63 | 6109 | struct hclge_vlan_filter_ctrl_cmd *req; |
46a3df9f S |
6110 | struct hclge_desc desc; |
6111 | int ret; | |
6112 | ||
6113 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); | |
6114 | ||
d44f9b63 | 6115 | req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; |
46a3df9f | 6116 | req->vlan_type = vlan_type; |
0e44d430 | 6117 | req->vlan_fe = filter_en ? fe_type : 0; |
46a3df9f S |
6118 | |
6119 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 6120 | if (ret) |
46a3df9f S |
6121 | dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", |
6122 | ret); | |
46a3df9f | 6123 | |
90415e85 | 6124 | return ret; |
46a3df9f S |
6125 | } |
6126 | ||
d818396d JS |
6127 | #define HCLGE_FILTER_TYPE_VF 0 |
6128 | #define HCLGE_FILTER_TYPE_PORT 1 | |
0e44d430 ZL |
6129 | #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) |
6130 | #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) | |
6131 | #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) | |
6132 | #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) | |
6133 | #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) | |
6134 | #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ | |
6135 | | HCLGE_FILTER_FE_ROCE_EGRESS_B) | |
6136 | #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ | |
6137 | | HCLGE_FILTER_FE_ROCE_INGRESS_B) | |
d818396d JS |
6138 | |
6139 | static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) | |
6140 | { | |
6141 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6142 | struct hclge_dev *hdev = vport->back; | |
6143 | ||
0e44d430 ZL |
6144 | if (hdev->pdev->revision >= 0x21) { |
6145 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6146 | HCLGE_FILTER_FE_EGRESS, enable); | |
6147 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, | |
6148 | HCLGE_FILTER_FE_INGRESS, enable); | |
6149 | } else { | |
6150 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6151 | HCLGE_FILTER_FE_EGRESS_V1_B, enable); | |
6152 | } | |
1e3653db JS |
6153 | if (enable) |
6154 | handle->netdev_flags |= HNAE3_VLAN_FLTR; | |
6155 | else | |
6156 | handle->netdev_flags &= ~HNAE3_VLAN_FLTR; | |
d818396d JS |
6157 | } |
6158 | ||
4e66632d YL |
6159 | static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, |
6160 | bool is_kill, u16 vlan, u8 qos, | |
6161 | __be16 proto) | |
46a3df9f S |
6162 | { |
6163 | #define HCLGE_MAX_VF_BYTES 16 | |
d44f9b63 YL |
6164 | struct hclge_vlan_filter_vf_cfg_cmd *req0; |
6165 | struct hclge_vlan_filter_vf_cfg_cmd *req1; | |
46a3df9f S |
6166 | struct hclge_desc desc[2]; |
6167 | u8 vf_byte_val; | |
6168 | u8 vf_byte_off; | |
6169 | int ret; | |
6170 | ||
6171 | hclge_cmd_setup_basic_desc(&desc[0], | |
6172 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6173 | hclge_cmd_setup_basic_desc(&desc[1], | |
6174 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6175 | ||
6176 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
6177 | ||
6178 | vf_byte_off = vfid / 8; | |
6179 | vf_byte_val = 1 << (vfid % 8); | |
6180 | ||
d44f9b63 YL |
6181 | req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
6182 | req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; | |
46a3df9f | 6183 | |
a90bb9a5 | 6184 | req0->vlan_id = cpu_to_le16(vlan); |
46a3df9f S |
6185 | req0->vlan_cfg = is_kill; |
6186 | ||
6187 | if (vf_byte_off < HCLGE_MAX_VF_BYTES) | |
6188 | req0->vf_bitmap[vf_byte_off] = vf_byte_val; | |
6189 | else | |
6190 | req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; | |
6191 | ||
6192 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
6193 | if (ret) { | |
6194 | dev_err(&hdev->pdev->dev, | |
6195 | "Send vf vlan command fail, ret =%d.\n", | |
6196 | ret); | |
6197 | return ret; | |
6198 | } | |
6199 | ||
6200 | if (!is_kill) { | |
715d610d | 6201 | #define HCLGE_VF_VLAN_NO_ENTRY 2 |
46a3df9f S |
6202 | if (!req0->resp_code || req0->resp_code == 1) |
6203 | return 0; | |
6204 | ||
715d610d YL |
6205 | if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { |
6206 | dev_warn(&hdev->pdev->dev, | |
6207 | "vf vlan table is full, vf vlan filter is disabled\n"); | |
6208 | return 0; | |
6209 | } | |
6210 | ||
46a3df9f S |
6211 | dev_err(&hdev->pdev->dev, |
6212 | "Add vf vlan filter fail, ret =%d.\n", | |
6213 | req0->resp_code); | |
6214 | } else { | |
29d3a843 | 6215 | #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 |
46a3df9f S |
6216 | if (!req0->resp_code) |
6217 | return 0; | |
6218 | ||
29d3a843 YL |
6219 | if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { |
6220 | dev_warn(&hdev->pdev->dev, | |
6221 | "vlan %d filter is not in vf vlan table\n", | |
6222 | vlan); | |
6223 | return 0; | |
6224 | } | |
6225 | ||
46a3df9f S |
6226 | dev_err(&hdev->pdev->dev, |
6227 | "Kill vf vlan filter fail, ret =%d.\n", | |
6228 | req0->resp_code); | |
6229 | } | |
6230 | ||
6231 | return -EIO; | |
6232 | } | |
6233 | ||
4e66632d YL |
6234 | static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, |
6235 | u16 vlan_id, bool is_kill) | |
46a3df9f | 6236 | { |
d44f9b63 | 6237 | struct hclge_vlan_filter_pf_cfg_cmd *req; |
46a3df9f S |
6238 | struct hclge_desc desc; |
6239 | u8 vlan_offset_byte_val; | |
6240 | u8 vlan_offset_byte; | |
6241 | u8 vlan_offset_160; | |
6242 | int ret; | |
6243 | ||
6244 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); | |
6245 | ||
6246 | vlan_offset_160 = vlan_id / 160; | |
6247 | vlan_offset_byte = (vlan_id % 160) / 8; | |
6248 | vlan_offset_byte_val = 1 << (vlan_id % 8); | |
6249 | ||
d44f9b63 | 6250 | req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; |
46a3df9f S |
6251 | req->vlan_offset = vlan_offset_160; |
6252 | req->vlan_cfg = is_kill; | |
6253 | req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; | |
6254 | ||
6255 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4e66632d YL |
6256 | if (ret) |
6257 | dev_err(&hdev->pdev->dev, | |
6258 | "port vlan command, send fail, ret =%d.\n", ret); | |
6259 | return ret; | |
6260 | } | |
6261 | ||
6262 | static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, | |
6263 | u16 vport_id, u16 vlan_id, u8 qos, | |
6264 | bool is_kill) | |
6265 | { | |
6266 | u16 vport_idx, vport_num = 0; | |
6267 | int ret; | |
6268 | ||
4935129c YL |
6269 | if (is_kill && !vlan_id) |
6270 | return 0; | |
6271 | ||
4e66632d YL |
6272 | ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, |
6273 | 0, proto); | |
46a3df9f S |
6274 | if (ret) { |
6275 | dev_err(&hdev->pdev->dev, | |
4e66632d YL |
6276 | "Set %d vport vlan filter config fail, ret =%d.\n", |
6277 | vport_id, ret); | |
46a3df9f S |
6278 | return ret; |
6279 | } | |
6280 | ||
4e66632d YL |
6281 | /* vlan 0 may be added twice when 8021q module is enabled */ |
6282 | if (!is_kill && !vlan_id && | |
6283 | test_bit(vport_id, hdev->vlan_table[vlan_id])) | |
6284 | return 0; | |
6285 | ||
6286 | if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
46a3df9f | 6287 | dev_err(&hdev->pdev->dev, |
4e66632d YL |
6288 | "Add port vlan failed, vport %d is already in vlan %d\n", |
6289 | vport_id, vlan_id); | |
6290 | return -EINVAL; | |
46a3df9f S |
6291 | } |
6292 | ||
4e66632d YL |
6293 | if (is_kill && |
6294 | !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
6295 | dev_err(&hdev->pdev->dev, | |
6296 | "Delete port vlan failed, vport %d is not in vlan %d\n", | |
6297 | vport_id, vlan_id); | |
6298 | return -EINVAL; | |
6299 | } | |
6300 | ||
3c6d4f43 | 6301 | for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) |
4e66632d YL |
6302 | vport_num++; |
6303 | ||
6304 | if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) | |
6305 | ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, | |
6306 | is_kill); | |
6307 | ||
6308 | return ret; | |
6309 | } | |
6310 | ||
6311 | int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, | |
6312 | u16 vlan_id, bool is_kill) | |
6313 | { | |
6314 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6315 | struct hclge_dev *hdev = vport->back; | |
6316 | ||
6317 | return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, | |
6318 | 0, is_kill); | |
46a3df9f S |
6319 | } |
6320 | ||
6321 | static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, | |
6322 | u16 vlan, u8 qos, __be16 proto) | |
6323 | { | |
6324 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6325 | struct hclge_dev *hdev = vport->back; | |
6326 | ||
6327 | if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) | |
6328 | return -EINVAL; | |
6329 | if (proto != htons(ETH_P_8021Q)) | |
6330 | return -EPROTONOSUPPORT; | |
6331 | ||
4e66632d | 6332 | return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); |
46a3df9f S |
6333 | } |
6334 | ||
e62f2a6b PL |
6335 | static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) |
6336 | { | |
6337 | struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; | |
6338 | struct hclge_vport_vtag_tx_cfg_cmd *req; | |
6339 | struct hclge_dev *hdev = vport->back; | |
6340 | struct hclge_desc desc; | |
6341 | int status; | |
6342 | ||
6343 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); | |
6344 | ||
6345 | req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; | |
6346 | req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); | |
6347 | req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); | |
ccc23ef3 PL |
6348 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, |
6349 | vcfg->accept_tag1 ? 1 : 0); | |
6350 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, | |
6351 | vcfg->accept_untag1 ? 1 : 0); | |
6352 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, | |
6353 | vcfg->accept_tag2 ? 1 : 0); | |
6354 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, | |
6355 | vcfg->accept_untag2 ? 1 : 0); | |
6356 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, | |
6357 | vcfg->insert_tag1_en ? 1 : 0); | |
6358 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, | |
6359 | vcfg->insert_tag2_en ? 1 : 0); | |
6360 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); | |
e62f2a6b PL |
6361 | |
6362 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6363 | req->vf_bitmap[req->vf_offset] = | |
6364 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6365 | ||
6366 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6367 | if (status) | |
6368 | dev_err(&hdev->pdev->dev, | |
6369 | "Send port txvlan cfg command fail, ret =%d\n", | |
6370 | status); | |
6371 | ||
6372 | return status; | |
6373 | } | |
6374 | ||
6375 | static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) | |
6376 | { | |
6377 | struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; | |
6378 | struct hclge_vport_vtag_rx_cfg_cmd *req; | |
6379 | struct hclge_dev *hdev = vport->back; | |
6380 | struct hclge_desc desc; | |
6381 | int status; | |
6382 | ||
6383 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); | |
6384 | ||
6385 | req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; | |
ccc23ef3 PL |
6386 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, |
6387 | vcfg->strip_tag1_en ? 1 : 0); | |
6388 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, | |
6389 | vcfg->strip_tag2_en ? 1 : 0); | |
6390 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, | |
6391 | vcfg->vlan1_vlan_prionly ? 1 : 0); | |
6392 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, | |
6393 | vcfg->vlan2_vlan_prionly ? 1 : 0); | |
e62f2a6b PL |
6394 | |
6395 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6396 | req->vf_bitmap[req->vf_offset] = | |
6397 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6398 | ||
6399 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6400 | if (status) | |
6401 | dev_err(&hdev->pdev->dev, | |
6402 | "Send port rxvlan cfg command fail, ret =%d\n", | |
6403 | status); | |
6404 | ||
6405 | return status; | |
6406 | } | |
6407 | ||
6408 | static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) | |
6409 | { | |
6410 | struct hclge_rx_vlan_type_cfg_cmd *rx_req; | |
6411 | struct hclge_tx_vlan_type_cfg_cmd *tx_req; | |
6412 | struct hclge_desc desc; | |
6413 | int status; | |
6414 | ||
6415 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); | |
6416 | rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; | |
6417 | rx_req->ot_fst_vlan_type = | |
6418 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); | |
6419 | rx_req->ot_sec_vlan_type = | |
6420 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); | |
6421 | rx_req->in_fst_vlan_type = | |
6422 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); | |
6423 | rx_req->in_sec_vlan_type = | |
6424 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); | |
6425 | ||
6426 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6427 | if (status) { | |
6428 | dev_err(&hdev->pdev->dev, | |
6429 | "Send rxvlan protocol type command fail, ret =%d\n", | |
6430 | status); | |
6431 | return status; | |
6432 | } | |
6433 | ||
6434 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); | |
6435 | ||
855f03fb | 6436 | tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; |
e62f2a6b PL |
6437 | tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); |
6438 | tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); | |
6439 | ||
6440 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6441 | if (status) | |
6442 | dev_err(&hdev->pdev->dev, | |
6443 | "Send txvlan protocol type command fail, ret =%d\n", | |
6444 | status); | |
6445 | ||
6446 | return status; | |
6447 | } | |
6448 | ||
46a3df9f S |
6449 | static int hclge_init_vlan_config(struct hclge_dev *hdev) |
6450 | { | |
e62f2a6b PL |
6451 | #define HCLGE_DEF_VLAN_TYPE 0x8100 |
6452 | ||
1e3653db | 6453 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
e62f2a6b | 6454 | struct hclge_vport *vport; |
46a3df9f | 6455 | int ret; |
e62f2a6b PL |
6456 | int i; |
6457 | ||
0e44d430 ZL |
6458 | if (hdev->pdev->revision >= 0x21) { |
6459 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6460 | HCLGE_FILTER_FE_EGRESS, true); | |
6461 | if (ret) | |
6462 | return ret; | |
46a3df9f | 6463 | |
0e44d430 ZL |
6464 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, |
6465 | HCLGE_FILTER_FE_INGRESS, true); | |
6466 | if (ret) | |
6467 | return ret; | |
6468 | } else { | |
6469 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6470 | HCLGE_FILTER_FE_EGRESS_V1_B, | |
6471 | true); | |
6472 | if (ret) | |
6473 | return ret; | |
6474 | } | |
46a3df9f | 6475 | |
1e3653db JS |
6476 | handle->netdev_flags |= HNAE3_VLAN_FLTR; |
6477 | ||
e62f2a6b PL |
6478 | hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; |
6479 | hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6480 | hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6481 | hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6482 | hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6483 | hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6484 | ||
6485 | ret = hclge_set_vlan_protocol_type(hdev); | |
5e43aef8 L |
6486 | if (ret) |
6487 | return ret; | |
46a3df9f | 6488 | |
e62f2a6b PL |
6489 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
6490 | vport = &hdev->vport[i]; | |
b75b1a56 PL |
6491 | vport->txvlan_cfg.accept_tag1 = true; |
6492 | vport->txvlan_cfg.accept_untag1 = true; | |
6493 | ||
6494 | /* accept_tag2 and accept_untag2 are not supported on | |
6495 | * pdev revision(0x20), new revision support them. The | |
6496 | * value of this two fields will not return error when driver | |
6497 | * send command to fireware in revision(0x20). | |
6498 | * This two fields can not configured by user. | |
6499 | */ | |
6500 | vport->txvlan_cfg.accept_tag2 = true; | |
6501 | vport->txvlan_cfg.accept_untag2 = true; | |
6502 | ||
e62f2a6b PL |
6503 | vport->txvlan_cfg.insert_tag1_en = false; |
6504 | vport->txvlan_cfg.insert_tag2_en = false; | |
6505 | vport->txvlan_cfg.default_tag1 = 0; | |
6506 | vport->txvlan_cfg.default_tag2 = 0; | |
6507 | ||
6508 | ret = hclge_set_vlan_tx_offload_cfg(vport); | |
6509 | if (ret) | |
6510 | return ret; | |
6511 | ||
6512 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6513 | vport->rxvlan_cfg.strip_tag2_en = true; | |
6514 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6515 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6516 | ||
6517 | ret = hclge_set_vlan_rx_offload_cfg(vport); | |
6518 | if (ret) | |
6519 | return ret; | |
6520 | } | |
6521 | ||
4e66632d | 6522 | return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); |
46a3df9f S |
6523 | } |
6524 | ||
3849d494 | 6525 | int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
5f9a7732 PL |
6526 | { |
6527 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6528 | ||
6529 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6530 | vport->rxvlan_cfg.strip_tag2_en = enable; | |
6531 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6532 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6533 | ||
6534 | return hclge_set_vlan_rx_offload_cfg(vport); | |
6535 | } | |
6536 | ||
4ee09281 | 6537 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) |
46a3df9f | 6538 | { |
d44f9b63 | 6539 | struct hclge_config_max_frm_size_cmd *req; |
46a3df9f | 6540 | struct hclge_desc desc; |
46a3df9f | 6541 | |
46a3df9f S |
6542 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); |
6543 | ||
d44f9b63 | 6544 | req = (struct hclge_config_max_frm_size_cmd *)desc.data; |
4ee09281 | 6545 | req->max_frm_size = cpu_to_le16(new_mps); |
b86fdbf3 | 6546 | req->min_frm_size = HCLGE_MAC_MIN_FRAME; |
46a3df9f | 6547 | |
4ee09281 | 6548 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
46a3df9f S |
6549 | } |
6550 | ||
12341881 FL |
6551 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
6552 | { | |
6553 | struct hclge_vport *vport = hclge_get_vport(handle); | |
b2c04029 YL |
6554 | |
6555 | return hclge_set_vport_mtu(vport, new_mtu); | |
6556 | } | |
6557 | ||
6558 | int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) | |
6559 | { | |
12341881 | 6560 | struct hclge_dev *hdev = vport->back; |
b2c04029 | 6561 | int i, max_frm_size, ret = 0; |
12341881 | 6562 | |
4ee09281 YL |
6563 | max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
6564 | if (max_frm_size < HCLGE_MAC_MIN_FRAME || | |
6565 | max_frm_size > HCLGE_MAC_MAX_FRAME) | |
6566 | return -EINVAL; | |
6567 | ||
b2c04029 YL |
6568 | max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); |
6569 | mutex_lock(&hdev->vport_lock); | |
6570 | /* VF's mps must fit within hdev->mps */ | |
6571 | if (vport->vport_id && max_frm_size > hdev->mps) { | |
6572 | mutex_unlock(&hdev->vport_lock); | |
6573 | return -EINVAL; | |
6574 | } else if (vport->vport_id) { | |
6575 | vport->mps = max_frm_size; | |
6576 | mutex_unlock(&hdev->vport_lock); | |
6577 | return 0; | |
6578 | } | |
6579 | ||
6580 | /* PF's mps must be greater then VF's mps */ | |
6581 | for (i = 1; i < hdev->num_alloc_vport; i++) | |
6582 | if (max_frm_size < hdev->vport[i].mps) { | |
6583 | mutex_unlock(&hdev->vport_lock); | |
6584 | return -EINVAL; | |
6585 | } | |
6586 | ||
268868f8 YL |
6587 | hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
6588 | ||
4ee09281 | 6589 | ret = hclge_set_mac_mtu(hdev, max_frm_size); |
12341881 FL |
6590 | if (ret) { |
6591 | dev_err(&hdev->pdev->dev, | |
6592 | "Change mtu fail, ret =%d\n", ret); | |
b2c04029 | 6593 | goto out; |
12341881 FL |
6594 | } |
6595 | ||
4ee09281 | 6596 | hdev->mps = max_frm_size; |
b2c04029 | 6597 | vport->mps = max_frm_size; |
4ee09281 | 6598 | |
12341881 FL |
6599 | ret = hclge_buffer_alloc(hdev); |
6600 | if (ret) | |
6601 | dev_err(&hdev->pdev->dev, | |
6602 | "Allocate buffer fail, ret =%d\n", ret); | |
6603 | ||
b2c04029 | 6604 | out: |
268868f8 | 6605 | hclge_notify_client(hdev, HNAE3_UP_CLIENT); |
b2c04029 | 6606 | mutex_unlock(&hdev->vport_lock); |
12341881 FL |
6607 | return ret; |
6608 | } | |
6609 | ||
46a3df9f S |
6610 | static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, |
6611 | bool enable) | |
6612 | { | |
d44f9b63 | 6613 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6614 | struct hclge_desc desc; |
6615 | int ret; | |
6616 | ||
6617 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); | |
6618 | ||
d44f9b63 | 6619 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f | 6620 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
ccc23ef3 | 6621 | hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); |
46a3df9f S |
6622 | |
6623 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6624 | if (ret) { | |
6625 | dev_err(&hdev->pdev->dev, | |
6626 | "Send tqp reset cmd error, status =%d\n", ret); | |
6627 | return ret; | |
6628 | } | |
6629 | ||
6630 | return 0; | |
6631 | } | |
6632 | ||
6633 | static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) | |
6634 | { | |
d44f9b63 | 6635 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6636 | struct hclge_desc desc; |
6637 | int ret; | |
6638 | ||
6639 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); | |
6640 | ||
d44f9b63 | 6641 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f S |
6642 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
6643 | ||
6644 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6645 | if (ret) { | |
6646 | dev_err(&hdev->pdev->dev, | |
6647 | "Get reset status error, status =%d\n", ret); | |
6648 | return ret; | |
6649 | } | |
6650 | ||
ccc23ef3 | 6651 | return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); |
46a3df9f S |
6652 | } |
6653 | ||
89d6386f | 6654 | u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) |
e5e89cda PL |
6655 | { |
6656 | struct hnae3_queue *queue; | |
6657 | struct hclge_tqp *tqp; | |
6658 | ||
6659 | queue = handle->kinfo.tqp[queue_id]; | |
6660 | tqp = container_of(queue, struct hclge_tqp, q); | |
6661 | ||
6662 | return tqp->index; | |
6663 | } | |
6664 | ||
abe62a63 | 6665 | int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
46a3df9f S |
6666 | { |
6667 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6668 | struct hclge_dev *hdev = vport->back; | |
6669 | int reset_try_times = 0; | |
6670 | int reset_status; | |
e5e89cda | 6671 | u16 queue_gid; |
abe62a63 | 6672 | int ret = 0; |
46a3df9f | 6673 | |
e5e89cda PL |
6674 | queue_gid = hclge_covert_handle_qid_global(handle, queue_id); |
6675 | ||
46a3df9f S |
6676 | ret = hclge_tqp_enable(hdev, queue_id, 0, false); |
6677 | if (ret) { | |
abe62a63 HT |
6678 | dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); |
6679 | return ret; | |
46a3df9f S |
6680 | } |
6681 | ||
e5e89cda | 6682 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); |
46a3df9f | 6683 | if (ret) { |
abe62a63 HT |
6684 | dev_err(&hdev->pdev->dev, |
6685 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6686 | return ret; | |
46a3df9f S |
6687 | } |
6688 | ||
6689 | reset_try_times = 0; | |
6690 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6691 | /* Wait for tqp hw reset */ | |
6692 | msleep(20); | |
e5e89cda | 6693 | reset_status = hclge_get_reset_status(hdev, queue_gid); |
46a3df9f S |
6694 | if (reset_status) |
6695 | break; | |
6696 | } | |
6697 | ||
6698 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
abe62a63 HT |
6699 | dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); |
6700 | return ret; | |
46a3df9f S |
6701 | } |
6702 | ||
e5e89cda | 6703 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); |
abe62a63 HT |
6704 | if (ret) |
6705 | dev_err(&hdev->pdev->dev, | |
6706 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6707 | ||
6708 | return ret; | |
46a3df9f S |
6709 | } |
6710 | ||
d3ea7fc4 PL |
6711 | void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) |
6712 | { | |
6713 | struct hclge_dev *hdev = vport->back; | |
6714 | int reset_try_times = 0; | |
6715 | int reset_status; | |
6716 | u16 queue_gid; | |
6717 | int ret; | |
6718 | ||
6719 | queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); | |
6720 | ||
6721 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); | |
6722 | if (ret) { | |
6723 | dev_warn(&hdev->pdev->dev, | |
6724 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6725 | return; | |
6726 | } | |
6727 | ||
6728 | reset_try_times = 0; | |
6729 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6730 | /* Wait for tqp hw reset */ | |
6731 | msleep(20); | |
6732 | reset_status = hclge_get_reset_status(hdev, queue_gid); | |
6733 | if (reset_status) | |
6734 | break; | |
6735 | } | |
6736 | ||
6737 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
6738 | dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); | |
6739 | return; | |
6740 | } | |
6741 | ||
6742 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); | |
6743 | if (ret) | |
6744 | dev_warn(&hdev->pdev->dev, | |
6745 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6746 | } | |
6747 | ||
46a3df9f S |
6748 | static u32 hclge_get_fw_version(struct hnae3_handle *handle) |
6749 | { | |
6750 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6751 | struct hclge_dev *hdev = vport->back; | |
6752 | ||
6753 | return hdev->fw_version; | |
6754 | } | |
6755 | ||
09ea401e PL |
6756 | static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) |
6757 | { | |
6758 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6759 | ||
6760 | if (!phydev) | |
6761 | return; | |
6762 | ||
6763 | phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); | |
6764 | ||
6765 | if (rx_en) | |
6766 | phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; | |
6767 | ||
6768 | if (tx_en) | |
6769 | phydev->advertising ^= ADVERTISED_Asym_Pause; | |
6770 | } | |
6771 | ||
6772 | static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) | |
6773 | { | |
09ea401e PL |
6774 | int ret; |
6775 | ||
6776 | if (rx_en && tx_en) | |
7a28a82a | 6777 | hdev->fc_mode_last_time = HCLGE_FC_FULL; |
09ea401e | 6778 | else if (rx_en && !tx_en) |
7a28a82a | 6779 | hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; |
09ea401e | 6780 | else if (!rx_en && tx_en) |
7a28a82a | 6781 | hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; |
09ea401e | 6782 | else |
7a28a82a | 6783 | hdev->fc_mode_last_time = HCLGE_FC_NONE; |
09ea401e | 6784 | |
7a28a82a | 6785 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) |
09ea401e | 6786 | return 0; |
09ea401e PL |
6787 | |
6788 | ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); | |
6789 | if (ret) { | |
6790 | dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", | |
6791 | ret); | |
6792 | return ret; | |
6793 | } | |
6794 | ||
7a28a82a | 6795 | hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
09ea401e PL |
6796 | |
6797 | return 0; | |
6798 | } | |
6799 | ||
6282f2ea PL |
6800 | int hclge_cfg_flowctrl(struct hclge_dev *hdev) |
6801 | { | |
6802 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6803 | u16 remote_advertising = 0; | |
6804 | u16 local_advertising = 0; | |
6805 | u32 rx_pause, tx_pause; | |
6806 | u8 flowctl; | |
6807 | ||
6808 | if (!phydev->link || !phydev->autoneg) | |
6809 | return 0; | |
6810 | ||
6811 | if (phydev->advertising & ADVERTISED_Pause) | |
6812 | local_advertising = ADVERTISE_PAUSE_CAP; | |
6813 | ||
6814 | if (phydev->advertising & ADVERTISED_Asym_Pause) | |
6815 | local_advertising |= ADVERTISE_PAUSE_ASYM; | |
6816 | ||
6817 | if (phydev->pause) | |
6818 | remote_advertising = LPA_PAUSE_CAP; | |
6819 | ||
6820 | if (phydev->asym_pause) | |
6821 | remote_advertising |= LPA_PAUSE_ASYM; | |
6822 | ||
6823 | flowctl = mii_resolve_flowctrl_fdx(local_advertising, | |
6824 | remote_advertising); | |
6825 | tx_pause = flowctl & FLOW_CTRL_TX; | |
6826 | rx_pause = flowctl & FLOW_CTRL_RX; | |
6827 | ||
6828 | if (phydev->duplex == HCLGE_MAC_HALF) { | |
6829 | tx_pause = 0; | |
6830 | rx_pause = 0; | |
6831 | } | |
6832 | ||
6833 | return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); | |
6834 | } | |
6835 | ||
46a3df9f S |
6836 | static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, |
6837 | u32 *rx_en, u32 *tx_en) | |
6838 | { | |
6839 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6840 | struct hclge_dev *hdev = vport->back; | |
6841 | ||
6842 | *auto_neg = hclge_get_autoneg(handle); | |
6843 | ||
6844 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6845 | *rx_en = 0; | |
6846 | *tx_en = 0; | |
6847 | return; | |
6848 | } | |
6849 | ||
6850 | if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { | |
6851 | *rx_en = 1; | |
6852 | *tx_en = 0; | |
6853 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { | |
6854 | *tx_en = 1; | |
6855 | *rx_en = 0; | |
6856 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { | |
6857 | *rx_en = 1; | |
6858 | *tx_en = 1; | |
6859 | } else { | |
6860 | *rx_en = 0; | |
6861 | *tx_en = 0; | |
6862 | } | |
6863 | } | |
6864 | ||
09ea401e PL |
6865 | static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, |
6866 | u32 rx_en, u32 tx_en) | |
6867 | { | |
6868 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6869 | struct hclge_dev *hdev = vport->back; | |
6870 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6871 | u32 fc_autoneg; | |
6872 | ||
09ea401e PL |
6873 | fc_autoneg = hclge_get_autoneg(handle); |
6874 | if (auto_neg != fc_autoneg) { | |
6875 | dev_info(&hdev->pdev->dev, | |
6876 | "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); | |
6877 | return -EOPNOTSUPP; | |
6878 | } | |
6879 | ||
6880 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6881 | dev_info(&hdev->pdev->dev, | |
6882 | "Priority flow control enabled. Cannot set link flow control.\n"); | |
6883 | return -EOPNOTSUPP; | |
6884 | } | |
6885 | ||
6886 | hclge_set_flowctrl_adv(hdev, rx_en, tx_en); | |
6887 | ||
6888 | if (!fc_autoneg) | |
6889 | return hclge_cfg_pauseparam(hdev, rx_en, tx_en); | |
6890 | ||
bef24782 FL |
6891 | /* Only support flow control negotiation for netdev with |
6892 | * phy attached for now. | |
6893 | */ | |
6894 | if (!phydev) | |
6895 | return -EOPNOTSUPP; | |
6896 | ||
09ea401e PL |
6897 | return phy_start_aneg(phydev); |
6898 | } | |
6899 | ||
46a3df9f S |
6900 | static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, |
6901 | u8 *auto_neg, u32 *speed, u8 *duplex) | |
6902 | { | |
6903 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6904 | struct hclge_dev *hdev = vport->back; | |
6905 | ||
6906 | if (speed) | |
6907 | *speed = hdev->hw.mac.speed; | |
6908 | if (duplex) | |
6909 | *duplex = hdev->hw.mac.duplex; | |
6910 | if (auto_neg) | |
6911 | *auto_neg = hdev->hw.mac.autoneg; | |
6912 | } | |
6913 | ||
6914 | static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) | |
6915 | { | |
6916 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6917 | struct hclge_dev *hdev = vport->back; | |
6918 | ||
6919 | if (media_type) | |
6920 | *media_type = hdev->hw.mac.media_type; | |
6921 | } | |
6922 | ||
6923 | static void hclge_get_mdix_mode(struct hnae3_handle *handle, | |
6924 | u8 *tp_mdix_ctrl, u8 *tp_mdix) | |
6925 | { | |
6926 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6927 | struct hclge_dev *hdev = vport->back; | |
6928 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6929 | int mdix_ctrl, mdix, retval, is_resolved; | |
6930 | ||
6931 | if (!phydev) { | |
6932 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6933 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6934 | return; | |
6935 | } | |
6936 | ||
6937 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); | |
6938 | ||
6939 | retval = phy_read(phydev, HCLGE_PHY_CSC_REG); | |
ccc23ef3 PL |
6940 | mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, |
6941 | HCLGE_PHY_MDIX_CTRL_S); | |
46a3df9f S |
6942 | |
6943 | retval = phy_read(phydev, HCLGE_PHY_CSS_REG); | |
ccc23ef3 PL |
6944 | mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); |
6945 | is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); | |
46a3df9f S |
6946 | |
6947 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); | |
6948 | ||
6949 | switch (mdix_ctrl) { | |
6950 | case 0x0: | |
6951 | *tp_mdix_ctrl = ETH_TP_MDI; | |
6952 | break; | |
6953 | case 0x1: | |
6954 | *tp_mdix_ctrl = ETH_TP_MDI_X; | |
6955 | break; | |
6956 | case 0x3: | |
6957 | *tp_mdix_ctrl = ETH_TP_MDI_AUTO; | |
6958 | break; | |
6959 | default: | |
6960 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6961 | break; | |
6962 | } | |
6963 | ||
6964 | if (!is_resolved) | |
6965 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6966 | else if (mdix) | |
6967 | *tp_mdix = ETH_TP_MDI_X; | |
6968 | else | |
6969 | *tp_mdix = ETH_TP_MDI; | |
6970 | } | |
6971 | ||
dda6b7d5 FL |
6972 | static int hclge_init_instance_hw(struct hclge_dev *hdev) |
6973 | { | |
6974 | return hclge_mac_connect_phy(hdev); | |
6975 | } | |
6976 | ||
6977 | static void hclge_uninit_instance_hw(struct hclge_dev *hdev) | |
6978 | { | |
6979 | hclge_mac_disconnect_phy(hdev); | |
6980 | } | |
6981 | ||
46a3df9f S |
6982 | static int hclge_init_client_instance(struct hnae3_client *client, |
6983 | struct hnae3_ae_dev *ae_dev) | |
6984 | { | |
6985 | struct hclge_dev *hdev = ae_dev->priv; | |
6986 | struct hclge_vport *vport; | |
6987 | int i, ret; | |
6988 | ||
6989 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
6990 | vport = &hdev->vport[i]; | |
6991 | ||
6992 | switch (client->type) { | |
6993 | case HNAE3_CLIENT_KNIC: | |
6994 | ||
6995 | hdev->nic_client = client; | |
6996 | vport->nic.client = client; | |
6997 | ret = client->ops->init_instance(&vport->nic); | |
6998 | if (ret) | |
2f59de78 | 6999 | goto clear_nic; |
46a3df9f | 7000 | |
dda6b7d5 FL |
7001 | ret = hclge_init_instance_hw(hdev); |
7002 | if (ret) { | |
7003 | client->ops->uninit_instance(&vport->nic, | |
7004 | 0); | |
2f59de78 | 7005 | goto clear_nic; |
dda6b7d5 FL |
7006 | } |
7007 | ||
8ed41eeb JS |
7008 | hnae3_set_client_init_flag(client, ae_dev, 1); |
7009 | ||
46a3df9f | 7010 | if (hdev->roce_client && |
e92a0843 | 7011 | hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
7012 | struct hnae3_client *rc = hdev->roce_client; |
7013 | ||
7014 | ret = hclge_init_roce_base_info(vport); | |
7015 | if (ret) | |
2f59de78 | 7016 | goto clear_roce; |
46a3df9f S |
7017 | |
7018 | ret = rc->ops->init_instance(&vport->roce); | |
7019 | if (ret) | |
2f59de78 | 7020 | goto clear_roce; |
8ed41eeb JS |
7021 | |
7022 | hnae3_set_client_init_flag(hdev->roce_client, | |
7023 | ae_dev, 1); | |
46a3df9f S |
7024 | } |
7025 | ||
7026 | break; | |
7027 | case HNAE3_CLIENT_UNIC: | |
7028 | hdev->nic_client = client; | |
7029 | vport->nic.client = client; | |
7030 | ||
7031 | ret = client->ops->init_instance(&vport->nic); | |
7032 | if (ret) | |
2f59de78 | 7033 | goto clear_nic; |
46a3df9f | 7034 | |
8ed41eeb JS |
7035 | hnae3_set_client_init_flag(client, ae_dev, 1); |
7036 | ||
46a3df9f S |
7037 | break; |
7038 | case HNAE3_CLIENT_ROCE: | |
e92a0843 | 7039 | if (hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
7040 | hdev->roce_client = client; |
7041 | vport->roce.client = client; | |
7042 | } | |
7043 | ||
3a46f34d | 7044 | if (hdev->roce_client && hdev->nic_client) { |
46a3df9f S |
7045 | ret = hclge_init_roce_base_info(vport); |
7046 | if (ret) | |
2f59de78 | 7047 | goto clear_roce; |
46a3df9f S |
7048 | |
7049 | ret = client->ops->init_instance(&vport->roce); | |
7050 | if (ret) | |
2f59de78 | 7051 | goto clear_roce; |
8ed41eeb JS |
7052 | |
7053 | hnae3_set_client_init_flag(client, ae_dev, 1); | |
46a3df9f | 7054 | } |
085920ba JS |
7055 | |
7056 | break; | |
7057 | default: | |
7058 | return -EINVAL; | |
46a3df9f S |
7059 | } |
7060 | } | |
7061 | ||
7062 | return 0; | |
2f59de78 JS |
7063 | |
7064 | clear_nic: | |
7065 | hdev->nic_client = NULL; | |
7066 | vport->nic.client = NULL; | |
7067 | return ret; | |
7068 | clear_roce: | |
7069 | hdev->roce_client = NULL; | |
7070 | vport->roce.client = NULL; | |
7071 | return ret; | |
46a3df9f S |
7072 | } |
7073 | ||
7074 | static void hclge_uninit_client_instance(struct hnae3_client *client, | |
7075 | struct hnae3_ae_dev *ae_dev) | |
7076 | { | |
7077 | struct hclge_dev *hdev = ae_dev->priv; | |
7078 | struct hclge_vport *vport; | |
7079 | int i; | |
7080 | ||
7081 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
7082 | vport = &hdev->vport[i]; | |
a17dcf3f | 7083 | if (hdev->roce_client) { |
46a3df9f S |
7084 | hdev->roce_client->ops->uninit_instance(&vport->roce, |
7085 | 0); | |
a17dcf3f L |
7086 | hdev->roce_client = NULL; |
7087 | vport->roce.client = NULL; | |
7088 | } | |
46a3df9f S |
7089 | if (client->type == HNAE3_CLIENT_ROCE) |
7090 | return; | |
2f59de78 | 7091 | if (hdev->nic_client && client->ops->uninit_instance) { |
dda6b7d5 | 7092 | hclge_uninit_instance_hw(hdev); |
46a3df9f | 7093 | client->ops->uninit_instance(&vport->nic, 0); |
a17dcf3f L |
7094 | hdev->nic_client = NULL; |
7095 | vport->nic.client = NULL; | |
7096 | } | |
46a3df9f S |
7097 | } |
7098 | } | |
7099 | ||
7100 | static int hclge_pci_init(struct hclge_dev *hdev) | |
7101 | { | |
7102 | struct pci_dev *pdev = hdev->pdev; | |
7103 | struct hclge_hw *hw; | |
7104 | int ret; | |
7105 | ||
7106 | ret = pci_enable_device(pdev); | |
7107 | if (ret) { | |
7108 | dev_err(&pdev->dev, "failed to enable PCI device\n"); | |
6c46284e | 7109 | return ret; |
46a3df9f S |
7110 | } |
7111 | ||
7112 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
7113 | if (ret) { | |
7114 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
7115 | if (ret) { | |
7116 | dev_err(&pdev->dev, | |
7117 | "can't set consistent PCI DMA"); | |
7118 | goto err_disable_device; | |
7119 | } | |
7120 | dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); | |
7121 | } | |
7122 | ||
7123 | ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); | |
7124 | if (ret) { | |
7125 | dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); | |
7126 | goto err_disable_device; | |
7127 | } | |
7128 | ||
7129 | pci_set_master(pdev); | |
7130 | hw = &hdev->hw; | |
46a3df9f S |
7131 | hw->io_base = pcim_iomap(pdev, 2, 0); |
7132 | if (!hw->io_base) { | |
7133 | dev_err(&pdev->dev, "Can't map configuration register space\n"); | |
7134 | ret = -ENOMEM; | |
7135 | goto err_clr_master; | |
7136 | } | |
7137 | ||
709eb41a L |
7138 | hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); |
7139 | ||
46a3df9f S |
7140 | return 0; |
7141 | err_clr_master: | |
7142 | pci_clear_master(pdev); | |
7143 | pci_release_regions(pdev); | |
7144 | err_disable_device: | |
7145 | pci_disable_device(pdev); | |
46a3df9f S |
7146 | |
7147 | return ret; | |
7148 | } | |
7149 | ||
7150 | static void hclge_pci_uninit(struct hclge_dev *hdev) | |
7151 | { | |
7152 | struct pci_dev *pdev = hdev->pdev; | |
7153 | ||
7d6d639b | 7154 | pcim_iounmap(pdev, hdev->hw.io_base); |
887c3820 | 7155 | pci_free_irq_vectors(pdev); |
46a3df9f S |
7156 | pci_clear_master(pdev); |
7157 | pci_release_mem_regions(pdev); | |
7158 | pci_disable_device(pdev); | |
7159 | } | |
7160 | ||
2ec3d9f0 PL |
7161 | static void hclge_state_init(struct hclge_dev *hdev) |
7162 | { | |
7163 | set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); | |
7164 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7165 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
7166 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
7167 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
7168 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
7169 | } | |
7170 | ||
7171 | static void hclge_state_uninit(struct hclge_dev *hdev) | |
7172 | { | |
7173 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7174 | ||
7175 | if (hdev->service_timer.function) | |
7176 | del_timer_sync(&hdev->service_timer); | |
1afdb53a HT |
7177 | if (hdev->reset_timer.function) |
7178 | del_timer_sync(&hdev->reset_timer); | |
2ec3d9f0 PL |
7179 | if (hdev->service_task.func) |
7180 | cancel_work_sync(&hdev->service_task); | |
7181 | if (hdev->rst_service_task.func) | |
7182 | cancel_work_sync(&hdev->rst_service_task); | |
7183 | if (hdev->mbx_service_task.func) | |
7184 | cancel_work_sync(&hdev->mbx_service_task); | |
7185 | } | |
7186 | ||
26977990 HT |
7187 | static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) |
7188 | { | |
7189 | #define HCLGE_FLR_WAIT_MS 100 | |
7190 | #define HCLGE_FLR_WAIT_CNT 50 | |
7191 | struct hclge_dev *hdev = ae_dev->priv; | |
7192 | int cnt = 0; | |
7193 | ||
7194 | clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
7195 | clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7196 | set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); | |
7197 | hclge_reset_event(hdev->pdev, NULL); | |
7198 | ||
7199 | while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && | |
7200 | cnt++ < HCLGE_FLR_WAIT_CNT) | |
7201 | msleep(HCLGE_FLR_WAIT_MS); | |
7202 | ||
7203 | if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) | |
7204 | dev_err(&hdev->pdev->dev, | |
7205 | "flr wait down timeout: %d\n", cnt); | |
7206 | } | |
7207 | ||
7208 | static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) | |
7209 | { | |
7210 | struct hclge_dev *hdev = ae_dev->priv; | |
7211 | ||
7212 | set_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7213 | } | |
7214 | ||
46a3df9f S |
7215 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
7216 | { | |
7217 | struct pci_dev *pdev = ae_dev->pdev; | |
46a3df9f S |
7218 | struct hclge_dev *hdev; |
7219 | int ret; | |
7220 | ||
7221 | hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); | |
7222 | if (!hdev) { | |
7223 | ret = -ENOMEM; | |
e0027501 | 7224 | goto out; |
46a3df9f S |
7225 | } |
7226 | ||
46a3df9f S |
7227 | hdev->pdev = pdev; |
7228 | hdev->ae_dev = ae_dev; | |
4ed340ab | 7229 | hdev->reset_type = HNAE3_NONE_RESET; |
1a2f7bf2 | 7230 | hdev->reset_level = HNAE3_FUNC_RESET; |
46a3df9f | 7231 | ae_dev->priv = hdev; |
4ee09281 | 7232 | hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
46a3df9f | 7233 | |
b2c04029 YL |
7234 | mutex_init(&hdev->vport_lock); |
7235 | ||
46a3df9f S |
7236 | ret = hclge_pci_init(hdev); |
7237 | if (ret) { | |
7238 | dev_err(&pdev->dev, "PCI init failed\n"); | |
e0027501 | 7239 | goto out; |
46a3df9f S |
7240 | } |
7241 | ||
3efb960f L |
7242 | /* Firmware command queue initialize */ |
7243 | ret = hclge_cmd_queue_init(hdev); | |
7244 | if (ret) { | |
7245 | dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); | |
e0027501 | 7246 | goto err_pci_uninit; |
3efb960f L |
7247 | } |
7248 | ||
7249 | /* Firmware command initialize */ | |
46a3df9f S |
7250 | ret = hclge_cmd_init(hdev); |
7251 | if (ret) | |
e0027501 | 7252 | goto err_cmd_uninit; |
46a3df9f S |
7253 | |
7254 | ret = hclge_get_cap(hdev); | |
7255 | if (ret) { | |
e00e2197 CIK |
7256 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", |
7257 | ret); | |
e0027501 | 7258 | goto err_cmd_uninit; |
46a3df9f S |
7259 | } |
7260 | ||
7261 | ret = hclge_configure(hdev); | |
7262 | if (ret) { | |
7263 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
e0027501 | 7264 | goto err_cmd_uninit; |
46a3df9f S |
7265 | } |
7266 | ||
887c3820 | 7267 | ret = hclge_init_msi(hdev); |
46a3df9f | 7268 | if (ret) { |
887c3820 | 7269 | dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); |
e0027501 | 7270 | goto err_cmd_uninit; |
46a3df9f S |
7271 | } |
7272 | ||
466b0c00 L |
7273 | ret = hclge_misc_irq_init(hdev); |
7274 | if (ret) { | |
7275 | dev_err(&pdev->dev, | |
7276 | "Misc IRQ(vector0) init error, ret = %d.\n", | |
7277 | ret); | |
e0027501 | 7278 | goto err_msi_uninit; |
466b0c00 L |
7279 | } |
7280 | ||
46a3df9f S |
7281 | ret = hclge_alloc_tqps(hdev); |
7282 | if (ret) { | |
7283 | dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); | |
e0027501 | 7284 | goto err_msi_irq_uninit; |
46a3df9f S |
7285 | } |
7286 | ||
7287 | ret = hclge_alloc_vport(hdev); | |
7288 | if (ret) { | |
7289 | dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); | |
e0027501 | 7290 | goto err_msi_irq_uninit; |
46a3df9f S |
7291 | } |
7292 | ||
7df7dad6 L |
7293 | ret = hclge_map_tqp(hdev); |
7294 | if (ret) { | |
7295 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
bc59f827 | 7296 | goto err_msi_irq_uninit; |
7df7dad6 L |
7297 | } |
7298 | ||
dea9a821 HT |
7299 | if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { |
7300 | ret = hclge_mac_mdio_config(hdev); | |
7301 | if (ret) { | |
7302 | dev_err(&hdev->pdev->dev, | |
7303 | "mdio config fail ret=%d\n", ret); | |
bc59f827 | 7304 | goto err_msi_irq_uninit; |
dea9a821 | 7305 | } |
cf9cca2d | 7306 | } |
7307 | ||
2da5ec58 JS |
7308 | ret = hclge_init_umv_space(hdev); |
7309 | if (ret) { | |
7310 | dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); | |
7311 | goto err_msi_irq_uninit; | |
7312 | } | |
7313 | ||
46a3df9f S |
7314 | ret = hclge_mac_init(hdev); |
7315 | if (ret) { | |
7316 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
e0027501 | 7317 | goto err_mdiobus_unreg; |
46a3df9f | 7318 | } |
46a3df9f S |
7319 | |
7320 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); | |
7321 | if (ret) { | |
7322 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
e0027501 | 7323 | goto err_mdiobus_unreg; |
46a3df9f S |
7324 | } |
7325 | ||
73f88b00 PL |
7326 | ret = hclge_config_gro(hdev, true); |
7327 | if (ret) | |
7328 | goto err_mdiobus_unreg; | |
7329 | ||
46a3df9f S |
7330 | ret = hclge_init_vlan_config(hdev); |
7331 | if (ret) { | |
7332 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
e0027501 | 7333 | goto err_mdiobus_unreg; |
46a3df9f S |
7334 | } |
7335 | ||
7336 | ret = hclge_tm_schd_init(hdev); | |
7337 | if (ret) { | |
7338 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
e0027501 | 7339 | goto err_mdiobus_unreg; |
68ece54e YL |
7340 | } |
7341 | ||
8015bb74 | 7342 | hclge_rss_init_cfg(hdev); |
68ece54e YL |
7343 | ret = hclge_rss_init_hw(hdev); |
7344 | if (ret) { | |
7345 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
e0027501 | 7346 | goto err_mdiobus_unreg; |
46a3df9f S |
7347 | } |
7348 | ||
635bfb58 FL |
7349 | ret = init_mgr_tbl(hdev); |
7350 | if (ret) { | |
7351 | dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); | |
e0027501 | 7352 | goto err_mdiobus_unreg; |
635bfb58 FL |
7353 | } |
7354 | ||
10a954bc JS |
7355 | ret = hclge_init_fd_config(hdev); |
7356 | if (ret) { | |
7357 | dev_err(&pdev->dev, | |
7358 | "fd table init fail, ret=%d\n", ret); | |
7359 | goto err_mdiobus_unreg; | |
7360 | } | |
7361 | ||
9f53588e SJ |
7362 | ret = hclge_hw_error_set_state(hdev, true); |
7363 | if (ret) { | |
7364 | dev_err(&pdev->dev, | |
9ee5dbbb | 7365 | "fail(%d) to enable hw error interrupts\n", ret); |
9f53588e SJ |
7366 | goto err_mdiobus_unreg; |
7367 | } | |
7368 | ||
cacde272 YL |
7369 | hclge_dcb_ops_set(hdev); |
7370 | ||
d039ef68 | 7371 | timer_setup(&hdev->service_timer, hclge_service_timer, 0); |
1afdb53a | 7372 | timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); |
46a3df9f | 7373 | INIT_WORK(&hdev->service_task, hclge_service_task); |
ed4a1bb8 | 7374 | INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); |
22fd3468 | 7375 | INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); |
46a3df9f | 7376 | |
9ab4ad14 XW |
7377 | hclge_clear_all_event_cause(hdev); |
7378 | ||
466b0c00 L |
7379 | /* Enable MISC vector(vector0) */ |
7380 | hclge_enable_vector(&hdev->misc_vector, true); | |
7381 | ||
2ec3d9f0 | 7382 | hclge_state_init(hdev); |
1a2f7bf2 | 7383 | hdev->last_reset_time = jiffies; |
46a3df9f S |
7384 | |
7385 | pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); | |
7386 | return 0; | |
7387 | ||
e0027501 HT |
7388 | err_mdiobus_unreg: |
7389 | if (hdev->hw.mac.phydev) | |
7390 | mdiobus_unregister(hdev->hw.mac.mdio_bus); | |
e0027501 HT |
7391 | err_msi_irq_uninit: |
7392 | hclge_misc_irq_uninit(hdev); | |
7393 | err_msi_uninit: | |
7394 | pci_free_irq_vectors(pdev); | |
7395 | err_cmd_uninit: | |
7396 | hclge_destroy_cmd_queue(&hdev->hw); | |
7397 | err_pci_uninit: | |
7d6d639b | 7398 | pcim_iounmap(pdev, hdev->hw.io_base); |
e0027501 | 7399 | pci_clear_master(pdev); |
46a3df9f | 7400 | pci_release_regions(pdev); |
e0027501 | 7401 | pci_disable_device(pdev); |
e0027501 | 7402 | out: |
46a3df9f S |
7403 | return ret; |
7404 | } | |
7405 | ||
c6dc5213 | 7406 | static void hclge_stats_clear(struct hclge_dev *hdev) |
7407 | { | |
7408 | memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); | |
7409 | } | |
7410 | ||
337460de YL |
7411 | static void hclge_reset_vport_state(struct hclge_dev *hdev) |
7412 | { | |
7413 | struct hclge_vport *vport = hdev->vport; | |
7414 | int i; | |
7415 | ||
7416 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
7417 | hclge_vport_start(vport); | |
7418 | vport++; | |
7419 | } | |
7420 | } | |
7421 | ||
4ed340ab L |
7422 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) |
7423 | { | |
7424 | struct hclge_dev *hdev = ae_dev->priv; | |
7425 | struct pci_dev *pdev = ae_dev->pdev; | |
7426 | int ret; | |
7427 | ||
7428 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7429 | ||
c6dc5213 | 7430 | hclge_stats_clear(hdev); |
4e66632d | 7431 | memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); |
c6dc5213 | 7432 | |
4ed340ab L |
7433 | ret = hclge_cmd_init(hdev); |
7434 | if (ret) { | |
7435 | dev_err(&pdev->dev, "Cmd queue init failed\n"); | |
7436 | return ret; | |
7437 | } | |
4ed340ab L |
7438 | |
7439 | ret = hclge_map_tqp(hdev); | |
7440 | if (ret) { | |
7441 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
7442 | return ret; | |
7443 | } | |
7444 | ||
2da5ec58 JS |
7445 | hclge_reset_umv_space(hdev); |
7446 | ||
4ed340ab L |
7447 | ret = hclge_mac_init(hdev); |
7448 | if (ret) { | |
7449 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
7450 | return ret; | |
7451 | } | |
7452 | ||
4ed340ab L |
7453 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); |
7454 | if (ret) { | |
7455 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
7456 | return ret; | |
7457 | } | |
7458 | ||
73f88b00 PL |
7459 | ret = hclge_config_gro(hdev, true); |
7460 | if (ret) | |
7461 | return ret; | |
7462 | ||
4ed340ab L |
7463 | ret = hclge_init_vlan_config(hdev); |
7464 | if (ret) { | |
7465 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
7466 | return ret; | |
7467 | } | |
7468 | ||
d85f1ab5 | 7469 | ret = hclge_tm_init_hw(hdev); |
4ed340ab | 7470 | if (ret) { |
d85f1ab5 | 7471 | dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); |
4ed340ab L |
7472 | return ret; |
7473 | } | |
7474 | ||
7475 | ret = hclge_rss_init_hw(hdev); | |
7476 | if (ret) { | |
7477 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
7478 | return ret; | |
7479 | } | |
7480 | ||
10a954bc JS |
7481 | ret = hclge_init_fd_config(hdev); |
7482 | if (ret) { | |
7483 | dev_err(&pdev->dev, | |
7484 | "fd table init fail, ret=%d\n", ret); | |
7485 | return ret; | |
7486 | } | |
7487 | ||
9ee5dbbb SJ |
7488 | /* Re-enable the hw error interrupts because |
7489 | * the interrupts get disabled on core/global reset. | |
78807a3d | 7490 | */ |
9ee5dbbb SJ |
7491 | ret = hclge_hw_error_set_state(hdev, true); |
7492 | if (ret) { | |
7493 | dev_err(&pdev->dev, | |
7494 | "fail(%d) to re-enable HNS hw error interrupts\n", ret); | |
7495 | return ret; | |
7496 | } | |
78807a3d | 7497 | |
337460de YL |
7498 | hclge_reset_vport_state(hdev); |
7499 | ||
4ed340ab L |
7500 | dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", |
7501 | HCLGE_DRIVER_NAME); | |
7502 | ||
7503 | return 0; | |
7504 | } | |
7505 | ||
46a3df9f S |
7506 | static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
7507 | { | |
7508 | struct hclge_dev *hdev = ae_dev->priv; | |
7509 | struct hclge_mac *mac = &hdev->hw.mac; | |
7510 | ||
2ec3d9f0 | 7511 | hclge_state_uninit(hdev); |
46a3df9f S |
7512 | |
7513 | if (mac->phydev) | |
7514 | mdiobus_unregister(mac->mdio_bus); | |
7515 | ||
2da5ec58 JS |
7516 | hclge_uninit_umv_space(hdev); |
7517 | ||
466b0c00 L |
7518 | /* Disable MISC vector(vector0) */ |
7519 | hclge_enable_vector(&hdev->misc_vector, false); | |
9ab4ad14 XW |
7520 | synchronize_irq(hdev->misc_vector.vector_irq); |
7521 | ||
9f53588e | 7522 | hclge_hw_error_set_state(hdev, false); |
46a3df9f | 7523 | hclge_destroy_cmd_queue(&hdev->hw); |
202f2014 | 7524 | hclge_misc_irq_uninit(hdev); |
46a3df9f | 7525 | hclge_pci_uninit(hdev); |
b2c04029 | 7526 | mutex_destroy(&hdev->vport_lock); |
46a3df9f S |
7527 | ae_dev->priv = NULL; |
7528 | } | |
7529 | ||
4f645a90 PL |
7530 | static u32 hclge_get_max_channels(struct hnae3_handle *handle) |
7531 | { | |
7532 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
7533 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7534 | struct hclge_dev *hdev = vport->back; | |
7535 | ||
7536 | return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); | |
7537 | } | |
7538 | ||
7539 | static void hclge_get_channels(struct hnae3_handle *handle, | |
7540 | struct ethtool_channels *ch) | |
7541 | { | |
7542 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7543 | ||
7544 | ch->max_combined = hclge_get_max_channels(handle); | |
7545 | ch->other_count = 1; | |
7546 | ch->max_other = 1; | |
7547 | ch->combined_count = vport->alloc_tqps; | |
7548 | } | |
7549 | ||
f1f779ce | 7550 | static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, |
08ca3d58 | 7551 | u16 *alloc_tqps, u16 *max_rss_size) |
f1f779ce PL |
7552 | { |
7553 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7554 | struct hclge_dev *hdev = vport->back; | |
f1f779ce | 7555 | |
08ca3d58 | 7556 | *alloc_tqps = vport->alloc_tqps; |
f1f779ce PL |
7557 | *max_rss_size = hdev->rss_size_max; |
7558 | } | |
7559 | ||
7560 | static void hclge_release_tqp(struct hclge_vport *vport) | |
7561 | { | |
7562 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7563 | struct hclge_dev *hdev = vport->back; | |
7564 | int i; | |
7565 | ||
7566 | for (i = 0; i < kinfo->num_tqps; i++) { | |
7567 | struct hclge_tqp *tqp = | |
7568 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
7569 | ||
7570 | tqp->q.handle = NULL; | |
7571 | tqp->q.tqp_index = 0; | |
7572 | tqp->alloced = false; | |
7573 | } | |
7574 | ||
7575 | devm_kfree(&hdev->pdev->dev, kinfo->tqp); | |
7576 | kinfo->tqp = NULL; | |
7577 | } | |
7578 | ||
7579 | static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) | |
7580 | { | |
7581 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7582 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7583 | struct hclge_dev *hdev = vport->back; | |
7584 | int cur_rss_size = kinfo->rss_size; | |
7585 | int cur_tqps = kinfo->num_tqps; | |
7586 | u16 tc_offset[HCLGE_MAX_TC_NUM]; | |
7587 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | |
7588 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
7589 | u16 roundup_size; | |
7590 | u32 *rss_indir; | |
7591 | int ret, i; | |
7592 | ||
f73c9107 | 7593 | /* Free old tqps, and reallocate with new tqp number when nic setup */ |
f1f779ce PL |
7594 | hclge_release_tqp(vport); |
7595 | ||
81356b1f | 7596 | ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); |
f1f779ce PL |
7597 | if (ret) { |
7598 | dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); | |
7599 | return ret; | |
7600 | } | |
7601 | ||
7602 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
7603 | if (ret) { | |
7604 | dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); | |
7605 | return ret; | |
7606 | } | |
7607 | ||
7608 | ret = hclge_tm_schd_init(hdev); | |
7609 | if (ret) { | |
7610 | dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
7611 | return ret; | |
7612 | } | |
7613 | ||
7614 | roundup_size = roundup_pow_of_two(kinfo->rss_size); | |
7615 | roundup_size = ilog2(roundup_size); | |
7616 | /* Set the RSS TC mode according to the new RSS size */ | |
7617 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
7618 | tc_valid[i] = 0; | |
7619 | ||
7620 | if (!(hdev->hw_tc_map & BIT(i))) | |
7621 | continue; | |
7622 | ||
7623 | tc_valid[i] = 1; | |
7624 | tc_size[i] = roundup_size; | |
7625 | tc_offset[i] = kinfo->rss_size * i; | |
7626 | } | |
7627 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); | |
7628 | if (ret) | |
7629 | return ret; | |
7630 | ||
7631 | /* Reinitializes the rss indirect table according to the new RSS size */ | |
7632 | rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); | |
7633 | if (!rss_indir) | |
7634 | return -ENOMEM; | |
7635 | ||
7636 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
7637 | rss_indir[i] = i % kinfo->rss_size; | |
7638 | ||
7639 | ret = hclge_set_rss(handle, rss_indir, NULL, 0); | |
7640 | if (ret) | |
7641 | dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", | |
7642 | ret); | |
7643 | ||
7644 | kfree(rss_indir); | |
7645 | ||
7646 | if (!ret) | |
7647 | dev_info(&hdev->pdev->dev, | |
7648 | "Channels changed, rss_size from %d to %d, tqps from %d to %d", | |
7649 | cur_rss_size, kinfo->rss_size, | |
7650 | cur_tqps, kinfo->rss_size * kinfo->num_tc); | |
7651 | ||
7652 | return ret; | |
7653 | } | |
7654 | ||
db2a3e43 FL |
7655 | static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, |
7656 | u32 *regs_num_64_bit) | |
7657 | { | |
7658 | struct hclge_desc desc; | |
7659 | u32 total_num; | |
7660 | int ret; | |
7661 | ||
7662 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); | |
7663 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7664 | if (ret) { | |
7665 | dev_err(&hdev->pdev->dev, | |
7666 | "Query register number cmd failed, ret = %d.\n", ret); | |
7667 | return ret; | |
7668 | } | |
7669 | ||
7670 | *regs_num_32_bit = le32_to_cpu(desc.data[0]); | |
7671 | *regs_num_64_bit = le32_to_cpu(desc.data[1]); | |
7672 | ||
7673 | total_num = *regs_num_32_bit + *regs_num_64_bit; | |
7674 | if (!total_num) | |
7675 | return -EINVAL; | |
7676 | ||
7677 | return 0; | |
7678 | } | |
7679 | ||
7680 | static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7681 | void *data) | |
7682 | { | |
7683 | #define HCLGE_32_BIT_REG_RTN_DATANUM 8 | |
7684 | ||
7685 | struct hclge_desc *desc; | |
7686 | u32 *reg_val = data; | |
7687 | __le32 *desc_data; | |
7688 | int cmd_num; | |
7689 | int i, k, n; | |
7690 | int ret; | |
7691 | ||
7692 | if (regs_num == 0) | |
7693 | return 0; | |
7694 | ||
7695 | cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); | |
7696 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7697 | if (!desc) | |
7698 | return -ENOMEM; | |
7699 | ||
7700 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); | |
7701 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7702 | if (ret) { | |
7703 | dev_err(&hdev->pdev->dev, | |
7704 | "Query 32 bit register cmd failed, ret = %d.\n", ret); | |
7705 | kfree(desc); | |
7706 | return ret; | |
7707 | } | |
7708 | ||
7709 | for (i = 0; i < cmd_num; i++) { | |
7710 | if (i == 0) { | |
7711 | desc_data = (__le32 *)(&desc[i].data[0]); | |
7712 | n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; | |
7713 | } else { | |
7714 | desc_data = (__le32 *)(&desc[i]); | |
7715 | n = HCLGE_32_BIT_REG_RTN_DATANUM; | |
7716 | } | |
7717 | for (k = 0; k < n; k++) { | |
7718 | *reg_val++ = le32_to_cpu(*desc_data++); | |
7719 | ||
7720 | regs_num--; | |
7721 | if (!regs_num) | |
7722 | break; | |
7723 | } | |
7724 | } | |
7725 | ||
7726 | kfree(desc); | |
7727 | return 0; | |
7728 | } | |
7729 | ||
7730 | static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7731 | void *data) | |
7732 | { | |
7733 | #define HCLGE_64_BIT_REG_RTN_DATANUM 4 | |
7734 | ||
7735 | struct hclge_desc *desc; | |
7736 | u64 *reg_val = data; | |
7737 | __le64 *desc_data; | |
7738 | int cmd_num; | |
7739 | int i, k, n; | |
7740 | int ret; | |
7741 | ||
7742 | if (regs_num == 0) | |
7743 | return 0; | |
7744 | ||
7745 | cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); | |
7746 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7747 | if (!desc) | |
7748 | return -ENOMEM; | |
7749 | ||
7750 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); | |
7751 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7752 | if (ret) { | |
7753 | dev_err(&hdev->pdev->dev, | |
7754 | "Query 64 bit register cmd failed, ret = %d.\n", ret); | |
7755 | kfree(desc); | |
7756 | return ret; | |
7757 | } | |
7758 | ||
7759 | for (i = 0; i < cmd_num; i++) { | |
7760 | if (i == 0) { | |
7761 | desc_data = (__le64 *)(&desc[i].data[0]); | |
7762 | n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; | |
7763 | } else { | |
7764 | desc_data = (__le64 *)(&desc[i]); | |
7765 | n = HCLGE_64_BIT_REG_RTN_DATANUM; | |
7766 | } | |
7767 | for (k = 0; k < n; k++) { | |
7768 | *reg_val++ = le64_to_cpu(*desc_data++); | |
7769 | ||
7770 | regs_num--; | |
7771 | if (!regs_num) | |
7772 | break; | |
7773 | } | |
7774 | } | |
7775 | ||
7776 | kfree(desc); | |
7777 | return 0; | |
7778 | } | |
7779 | ||
a1018e31 JS |
7780 | #define MAX_SEPARATE_NUM 4 |
7781 | #define SEPARATOR_VALUE 0xFFFFFFFF | |
7782 | #define REG_NUM_PER_LINE 4 | |
7783 | #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) | |
7784 | ||
db2a3e43 FL |
7785 | static int hclge_get_regs_len(struct hnae3_handle *handle) |
7786 | { | |
a1018e31 JS |
7787 | int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; |
7788 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
db2a3e43 FL |
7789 | struct hclge_vport *vport = hclge_get_vport(handle); |
7790 | struct hclge_dev *hdev = vport->back; | |
7791 | u32 regs_num_32_bit, regs_num_64_bit; | |
7792 | int ret; | |
7793 | ||
7794 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7795 | if (ret) { | |
7796 | dev_err(&hdev->pdev->dev, | |
7797 | "Get register number failed, ret = %d.\n", ret); | |
7798 | return -EOPNOTSUPP; | |
7799 | } | |
7800 | ||
a1018e31 JS |
7801 | cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; |
7802 | common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7803 | ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7804 | tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7805 | ||
7806 | return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + | |
7807 | tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + | |
7808 | regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); | |
db2a3e43 FL |
7809 | } |
7810 | ||
7811 | static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, | |
7812 | void *data) | |
7813 | { | |
a1018e31 | 7814 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
db2a3e43 FL |
7815 | struct hclge_vport *vport = hclge_get_vport(handle); |
7816 | struct hclge_dev *hdev = vport->back; | |
7817 | u32 regs_num_32_bit, regs_num_64_bit; | |
a1018e31 JS |
7818 | int i, j, reg_um, separator_num; |
7819 | u32 *reg = data; | |
db2a3e43 FL |
7820 | int ret; |
7821 | ||
7822 | *version = hdev->fw_version; | |
7823 | ||
7824 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7825 | if (ret) { | |
7826 | dev_err(&hdev->pdev->dev, | |
7827 | "Get register number failed, ret = %d.\n", ret); | |
7828 | return; | |
7829 | } | |
7830 | ||
a1018e31 JS |
7831 | /* fetching per-PF registers valus from PF PCIe register space */ |
7832 | reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); | |
7833 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7834 | for (i = 0; i < reg_um; i++) | |
7835 | *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); | |
7836 | for (i = 0; i < separator_num; i++) | |
7837 | *reg++ = SEPARATOR_VALUE; | |
7838 | ||
7839 | reg_um = sizeof(common_reg_addr_list) / sizeof(u32); | |
7840 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7841 | for (i = 0; i < reg_um; i++) | |
7842 | *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); | |
7843 | for (i = 0; i < separator_num; i++) | |
7844 | *reg++ = SEPARATOR_VALUE; | |
7845 | ||
7846 | reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); | |
7847 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7848 | for (j = 0; j < kinfo->num_tqps; j++) { | |
7849 | for (i = 0; i < reg_um; i++) | |
7850 | *reg++ = hclge_read_dev(&hdev->hw, | |
7851 | ring_reg_addr_list[i] + | |
7852 | 0x200 * j); | |
7853 | for (i = 0; i < separator_num; i++) | |
7854 | *reg++ = SEPARATOR_VALUE; | |
7855 | } | |
7856 | ||
7857 | reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); | |
7858 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7859 | for (j = 0; j < hdev->num_msi_used - 1; j++) { | |
7860 | for (i = 0; i < reg_um; i++) | |
7861 | *reg++ = hclge_read_dev(&hdev->hw, | |
7862 | tqp_intr_reg_addr_list[i] + | |
7863 | 4 * j); | |
7864 | for (i = 0; i < separator_num; i++) | |
7865 | *reg++ = SEPARATOR_VALUE; | |
7866 | } | |
7867 | ||
7868 | /* fetching PF common registers values from firmware */ | |
7869 | ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); | |
db2a3e43 FL |
7870 | if (ret) { |
7871 | dev_err(&hdev->pdev->dev, | |
7872 | "Get 32 bit register failed, ret = %d.\n", ret); | |
7873 | return; | |
7874 | } | |
7875 | ||
a1018e31 JS |
7876 | reg += regs_num_32_bit; |
7877 | ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); | |
db2a3e43 FL |
7878 | if (ret) |
7879 | dev_err(&hdev->pdev->dev, | |
7880 | "Get 64 bit register failed, ret = %d.\n", ret); | |
7881 | } | |
7882 | ||
fe3a3e15 | 7883 | static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) |
d9a0884e JS |
7884 | { |
7885 | struct hclge_set_led_state_cmd *req; | |
7886 | struct hclge_desc desc; | |
7887 | int ret; | |
7888 | ||
7889 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); | |
7890 | ||
7891 | req = (struct hclge_set_led_state_cmd *)desc.data; | |
ccc23ef3 PL |
7892 | hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, |
7893 | HCLGE_LED_LOCATE_STATE_S, locate_led_status); | |
d9a0884e JS |
7894 | |
7895 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7896 | if (ret) | |
7897 | dev_err(&hdev->pdev->dev, | |
7898 | "Send set led state cmd error, ret =%d\n", ret); | |
7899 | ||
7900 | return ret; | |
7901 | } | |
7902 | ||
7903 | enum hclge_led_status { | |
7904 | HCLGE_LED_OFF, | |
7905 | HCLGE_LED_ON, | |
7906 | HCLGE_LED_NO_CHANGE = 0xFF, | |
7907 | }; | |
7908 | ||
7909 | static int hclge_set_led_id(struct hnae3_handle *handle, | |
7910 | enum ethtool_phys_id_state status) | |
7911 | { | |
d9a0884e JS |
7912 | struct hclge_vport *vport = hclge_get_vport(handle); |
7913 | struct hclge_dev *hdev = vport->back; | |
d9a0884e JS |
7914 | |
7915 | switch (status) { | |
7916 | case ETHTOOL_ID_ACTIVE: | |
fe3a3e15 | 7917 | return hclge_set_led_status(hdev, HCLGE_LED_ON); |
d9a0884e | 7918 | case ETHTOOL_ID_INACTIVE: |
fe3a3e15 | 7919 | return hclge_set_led_status(hdev, HCLGE_LED_OFF); |
d9a0884e | 7920 | default: |
fe3a3e15 | 7921 | return -EINVAL; |
d9a0884e | 7922 | } |
d9a0884e JS |
7923 | } |
7924 | ||
d92ceae9 FL |
7925 | static void hclge_get_link_mode(struct hnae3_handle *handle, |
7926 | unsigned long *supported, | |
7927 | unsigned long *advertising) | |
7928 | { | |
7929 | unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); | |
7930 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7931 | struct hclge_dev *hdev = vport->back; | |
7932 | unsigned int idx = 0; | |
7933 | ||
7934 | for (; idx < size; idx++) { | |
7935 | supported[idx] = hdev->hw.mac.supported[idx]; | |
7936 | advertising[idx] = hdev->hw.mac.advertising[idx]; | |
7937 | } | |
7938 | } | |
7939 | ||
46a3df9f S |
7940 | static const struct hnae3_ae_ops hclge_ops = { |
7941 | .init_ae_dev = hclge_init_ae_dev, | |
7942 | .uninit_ae_dev = hclge_uninit_ae_dev, | |
26977990 HT |
7943 | .flr_prepare = hclge_flr_prepare, |
7944 | .flr_done = hclge_flr_done, | |
46a3df9f S |
7945 | .init_client_instance = hclge_init_client_instance, |
7946 | .uninit_client_instance = hclge_uninit_client_instance, | |
63d7e66f SM |
7947 | .map_ring_to_vector = hclge_map_ring_to_vector, |
7948 | .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, | |
46a3df9f | 7949 | .get_vector = hclge_get_vector, |
7412200c | 7950 | .put_vector = hclge_put_vector, |
46a3df9f | 7951 | .set_promisc_mode = hclge_set_promisc_mode, |
c39c4d98 | 7952 | .set_loopback = hclge_set_loopback, |
46a3df9f S |
7953 | .start = hclge_ae_start, |
7954 | .stop = hclge_ae_stop, | |
337460de YL |
7955 | .client_start = hclge_client_start, |
7956 | .client_stop = hclge_client_stop, | |
46a3df9f S |
7957 | .get_status = hclge_get_status, |
7958 | .get_ksettings_an_result = hclge_get_ksettings_an_result, | |
7959 | .update_speed_duplex_h = hclge_update_speed_duplex_h, | |
7960 | .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, | |
7961 | .get_media_type = hclge_get_media_type, | |
7962 | .get_rss_key_size = hclge_get_rss_key_size, | |
7963 | .get_rss_indir_size = hclge_get_rss_indir_size, | |
7964 | .get_rss = hclge_get_rss, | |
7965 | .set_rss = hclge_set_rss, | |
f7db940a | 7966 | .set_rss_tuple = hclge_set_rss_tuple, |
07d29954 | 7967 | .get_rss_tuple = hclge_get_rss_tuple, |
46a3df9f S |
7968 | .get_tc_size = hclge_get_tc_size, |
7969 | .get_mac_addr = hclge_get_mac_addr, | |
7970 | .set_mac_addr = hclge_set_mac_addr, | |
a185d723 | 7971 | .do_ioctl = hclge_do_ioctl, |
46a3df9f S |
7972 | .add_uc_addr = hclge_add_uc_addr, |
7973 | .rm_uc_addr = hclge_rm_uc_addr, | |
7974 | .add_mc_addr = hclge_add_mc_addr, | |
7975 | .rm_mc_addr = hclge_rm_mc_addr, | |
7976 | .set_autoneg = hclge_set_autoneg, | |
7977 | .get_autoneg = hclge_get_autoneg, | |
7978 | .get_pauseparam = hclge_get_pauseparam, | |
09ea401e | 7979 | .set_pauseparam = hclge_set_pauseparam, |
46a3df9f S |
7980 | .set_mtu = hclge_set_mtu, |
7981 | .reset_queue = hclge_reset_tqp, | |
7982 | .get_stats = hclge_get_stats, | |
7983 | .update_stats = hclge_update_stats, | |
7984 | .get_strings = hclge_get_strings, | |
7985 | .get_sset_count = hclge_get_sset_count, | |
7986 | .get_fw_version = hclge_get_fw_version, | |
7987 | .get_mdix_mode = hclge_get_mdix_mode, | |
d818396d | 7988 | .enable_vlan_filter = hclge_enable_vlan_filter, |
4e66632d | 7989 | .set_vlan_filter = hclge_set_vlan_filter, |
46a3df9f | 7990 | .set_vf_vlan_filter = hclge_set_vf_vlan_filter, |
5f9a7732 | 7991 | .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, |
4ed340ab | 7992 | .reset_event = hclge_reset_event, |
2c883d73 | 7993 | .set_default_reset_request = hclge_set_def_reset_request, |
f1f779ce PL |
7994 | .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, |
7995 | .set_channels = hclge_set_channels, | |
4f645a90 | 7996 | .get_channels = hclge_get_channels, |
db2a3e43 FL |
7997 | .get_regs_len = hclge_get_regs_len, |
7998 | .get_regs = hclge_get_regs, | |
d9a0884e | 7999 | .set_led_id = hclge_set_led_id, |
d92ceae9 | 8000 | .get_link_mode = hclge_get_link_mode, |
3ca8e27c JS |
8001 | .add_fd_entry = hclge_add_fd_entry, |
8002 | .del_fd_entry = hclge_del_fd_entry, | |
7ce98982 | 8003 | .del_all_fd_entries = hclge_del_all_fd_entries, |
295043a7 JS |
8004 | .get_fd_rule_cnt = hclge_get_fd_rule_cnt, |
8005 | .get_fd_rule_info = hclge_get_fd_rule_info, | |
8006 | .get_fd_all_rules = hclge_get_all_rules, | |
7ce98982 | 8007 | .restore_fd_rules = hclge_restore_fd_entries, |
d1f04a80 | 8008 | .enable_fd = hclge_enable_fd, |
bf4fd28d | 8009 | .dbg_run_cmd = hclge_dbg_run_cmd, |
af72a21f | 8010 | .handle_hw_ras_error = hclge_handle_hw_ras_error, |
225c02eb HT |
8011 | .get_hw_reset_stat = hclge_get_hw_reset_stat, |
8012 | .ae_dev_resetting = hclge_ae_dev_resetting, | |
8013 | .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, | |
89d6386f | 8014 | .get_global_queue_id = hclge_covert_handle_qid_global, |
fad0e9d8 | 8015 | .set_timer_task = hclge_set_timer_task, |
46a3df9f S |
8016 | }; |
8017 | ||
8018 | static struct hnae3_ae_algo ae_algo = { | |
8019 | .ops = &hclge_ops, | |
46a3df9f S |
8020 | .pdev_id_table = ae_algo_pci_tbl, |
8021 | }; | |
8022 | ||
8023 | static int hclge_init(void) | |
8024 | { | |
8025 | pr_info("%s is initializing\n", HCLGE_NAME); | |
8026 | ||
a4d090cc FL |
8027 | hnae3_register_ae_algo(&ae_algo); |
8028 | ||
8029 | return 0; | |
46a3df9f S |
8030 | } |
8031 | ||
8032 | static void hclge_exit(void) | |
8033 | { | |
8034 | hnae3_unregister_ae_algo(&ae_algo); | |
8035 | } | |
8036 | module_init(hclge_init); | |
8037 | module_exit(hclge_exit); | |
8038 | ||
8039 | MODULE_LICENSE("GPL"); | |
8040 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); | |
8041 | MODULE_DESCRIPTION("HCLGE Driver"); | |
8042 | MODULE_VERSION(HCLGE_MOD_VERSION); |