]>
Commit | Line | Data |
---|---|---|
ef57c40f JS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. | |
46a3df9f S |
3 | |
4 | #include <linux/acpi.h> | |
5 | #include <linux/device.h> | |
6 | #include <linux/etherdevice.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/platform_device.h> | |
7393ed39 | 14 | #include <linux/if_vlan.h> |
d5752031 | 15 | #include <net/rtnetlink.h> |
46a3df9f | 16 | #include "hclge_cmd.h" |
cacde272 | 17 | #include "hclge_dcb.h" |
46a3df9f | 18 | #include "hclge_main.h" |
0cdbdd3e | 19 | #include "hclge_mbx.h" |
46a3df9f S |
20 | #include "hclge_mdio.h" |
21 | #include "hclge_tm.h" | |
00bb612a | 22 | #include "hclge_err.h" |
46a3df9f S |
23 | #include "hnae3.h" |
24 | ||
25 | #define HCLGE_NAME "hclge" | |
26 | #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) | |
27 | #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) | |
46a3df9f | 28 | |
4ee09281 | 29 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); |
46a3df9f | 30 | static int hclge_init_vlan_config(struct hclge_dev *hdev); |
4ed340ab | 31 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); |
2da5ec58 JS |
32 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, |
33 | u16 *allocated_size, bool is_alloc); | |
46a3df9f S |
34 | |
35 | static struct hnae3_ae_algo ae_algo; | |
36 | ||
37 | static const struct pci_device_id ae_algo_pci_tbl[] = { | |
38 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | |
39 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | |
40 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | |
41 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | |
42 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | |
43 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | |
44 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | |
e92a0843 | 45 | /* required last entry */ |
46a3df9f S |
46 | {0, } |
47 | }; | |
48 | ||
28d9cec8 YL |
49 | MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); |
50 | ||
a1018e31 JS |
51 | static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, |
52 | HCLGE_CMDQ_TX_ADDR_H_REG, | |
53 | HCLGE_CMDQ_TX_DEPTH_REG, | |
54 | HCLGE_CMDQ_TX_TAIL_REG, | |
55 | HCLGE_CMDQ_TX_HEAD_REG, | |
56 | HCLGE_CMDQ_RX_ADDR_L_REG, | |
57 | HCLGE_CMDQ_RX_ADDR_H_REG, | |
58 | HCLGE_CMDQ_RX_DEPTH_REG, | |
59 | HCLGE_CMDQ_RX_TAIL_REG, | |
60 | HCLGE_CMDQ_RX_HEAD_REG, | |
61 | HCLGE_VECTOR0_CMDQ_SRC_REG, | |
62 | HCLGE_CMDQ_INTR_STS_REG, | |
63 | HCLGE_CMDQ_INTR_EN_REG, | |
64 | HCLGE_CMDQ_INTR_GEN_REG}; | |
65 | ||
66 | static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, | |
67 | HCLGE_VECTOR0_OTER_EN_REG, | |
68 | HCLGE_MISC_RESET_STS_REG, | |
69 | HCLGE_MISC_VECTOR_INT_STS, | |
70 | HCLGE_GLOBAL_RESET_REG, | |
71 | HCLGE_FUN_RST_ING, | |
72 | HCLGE_GRO_EN_REG}; | |
73 | ||
74 | static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, | |
75 | HCLGE_RING_RX_ADDR_H_REG, | |
76 | HCLGE_RING_RX_BD_NUM_REG, | |
77 | HCLGE_RING_RX_BD_LENGTH_REG, | |
78 | HCLGE_RING_RX_MERGE_EN_REG, | |
79 | HCLGE_RING_RX_TAIL_REG, | |
80 | HCLGE_RING_RX_HEAD_REG, | |
81 | HCLGE_RING_RX_FBD_NUM_REG, | |
82 | HCLGE_RING_RX_OFFSET_REG, | |
83 | HCLGE_RING_RX_FBD_OFFSET_REG, | |
84 | HCLGE_RING_RX_STASH_REG, | |
85 | HCLGE_RING_RX_BD_ERR_REG, | |
86 | HCLGE_RING_TX_ADDR_L_REG, | |
87 | HCLGE_RING_TX_ADDR_H_REG, | |
88 | HCLGE_RING_TX_BD_NUM_REG, | |
89 | HCLGE_RING_TX_PRIORITY_REG, | |
90 | HCLGE_RING_TX_TC_REG, | |
91 | HCLGE_RING_TX_MERGE_EN_REG, | |
92 | HCLGE_RING_TX_TAIL_REG, | |
93 | HCLGE_RING_TX_HEAD_REG, | |
94 | HCLGE_RING_TX_FBD_NUM_REG, | |
95 | HCLGE_RING_TX_OFFSET_REG, | |
96 | HCLGE_RING_TX_EBD_NUM_REG, | |
97 | HCLGE_RING_TX_EBD_OFFSET_REG, | |
98 | HCLGE_RING_TX_BD_ERR_REG, | |
99 | HCLGE_RING_EN_REG}; | |
100 | ||
101 | static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, | |
102 | HCLGE_TQP_INTR_GL0_REG, | |
103 | HCLGE_TQP_INTR_GL1_REG, | |
104 | HCLGE_TQP_INTR_GL2_REG, | |
105 | HCLGE_TQP_INTR_RL_REG}; | |
106 | ||
46a3df9f | 107 | static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { |
67b8c316 | 108 | "App Loopback test", |
86957272 FL |
109 | "Serdes serial Loopback test", |
110 | "Serdes parallel Loopback test", | |
46a3df9f S |
111 | "Phy Loopback test" |
112 | }; | |
113 | ||
46a3df9f S |
114 | static const struct hclge_comm_stats_str g_mac_stats_string[] = { |
115 | {"mac_tx_mac_pause_num", | |
116 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, | |
117 | {"mac_rx_mac_pause_num", | |
118 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, | |
119 | {"mac_tx_pfc_pri0_pkt_num", | |
120 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, | |
121 | {"mac_tx_pfc_pri1_pkt_num", | |
122 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, | |
123 | {"mac_tx_pfc_pri2_pkt_num", | |
124 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, | |
125 | {"mac_tx_pfc_pri3_pkt_num", | |
126 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, | |
127 | {"mac_tx_pfc_pri4_pkt_num", | |
128 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, | |
129 | {"mac_tx_pfc_pri5_pkt_num", | |
130 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, | |
131 | {"mac_tx_pfc_pri6_pkt_num", | |
132 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, | |
133 | {"mac_tx_pfc_pri7_pkt_num", | |
134 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, | |
135 | {"mac_rx_pfc_pri0_pkt_num", | |
136 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, | |
137 | {"mac_rx_pfc_pri1_pkt_num", | |
138 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, | |
139 | {"mac_rx_pfc_pri2_pkt_num", | |
140 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, | |
141 | {"mac_rx_pfc_pri3_pkt_num", | |
142 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, | |
143 | {"mac_rx_pfc_pri4_pkt_num", | |
144 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, | |
145 | {"mac_rx_pfc_pri5_pkt_num", | |
146 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, | |
147 | {"mac_rx_pfc_pri6_pkt_num", | |
148 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, | |
149 | {"mac_rx_pfc_pri7_pkt_num", | |
150 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, | |
151 | {"mac_tx_total_pkt_num", | |
152 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, | |
153 | {"mac_tx_total_oct_num", | |
154 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, | |
155 | {"mac_tx_good_pkt_num", | |
156 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, | |
157 | {"mac_tx_bad_pkt_num", | |
158 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, | |
159 | {"mac_tx_good_oct_num", | |
160 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, | |
161 | {"mac_tx_bad_oct_num", | |
162 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, | |
163 | {"mac_tx_uni_pkt_num", | |
164 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, | |
165 | {"mac_tx_multi_pkt_num", | |
166 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, | |
167 | {"mac_tx_broad_pkt_num", | |
168 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, | |
169 | {"mac_tx_undersize_pkt_num", | |
170 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, | |
f3426583 JS |
171 | {"mac_tx_oversize_pkt_num", |
172 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, | |
46a3df9f S |
173 | {"mac_tx_64_oct_pkt_num", |
174 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, | |
175 | {"mac_tx_65_127_oct_pkt_num", | |
176 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, | |
177 | {"mac_tx_128_255_oct_pkt_num", | |
178 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, | |
179 | {"mac_tx_256_511_oct_pkt_num", | |
180 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, | |
181 | {"mac_tx_512_1023_oct_pkt_num", | |
182 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, | |
183 | {"mac_tx_1024_1518_oct_pkt_num", | |
184 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, | |
b42874e4 JS |
185 | {"mac_tx_1519_2047_oct_pkt_num", |
186 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, | |
187 | {"mac_tx_2048_4095_oct_pkt_num", | |
188 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, | |
189 | {"mac_tx_4096_8191_oct_pkt_num", | |
190 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, | |
b42874e4 JS |
191 | {"mac_tx_8192_9216_oct_pkt_num", |
192 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, | |
193 | {"mac_tx_9217_12287_oct_pkt_num", | |
194 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, | |
195 | {"mac_tx_12288_16383_oct_pkt_num", | |
196 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, | |
197 | {"mac_tx_1519_max_good_pkt_num", | |
198 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, | |
199 | {"mac_tx_1519_max_bad_pkt_num", | |
200 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f S |
201 | {"mac_rx_total_pkt_num", |
202 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, | |
203 | {"mac_rx_total_oct_num", | |
204 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, | |
205 | {"mac_rx_good_pkt_num", | |
206 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, | |
207 | {"mac_rx_bad_pkt_num", | |
208 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, | |
209 | {"mac_rx_good_oct_num", | |
210 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, | |
211 | {"mac_rx_bad_oct_num", | |
212 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, | |
213 | {"mac_rx_uni_pkt_num", | |
214 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, | |
215 | {"mac_rx_multi_pkt_num", | |
216 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, | |
217 | {"mac_rx_broad_pkt_num", | |
218 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, | |
219 | {"mac_rx_undersize_pkt_num", | |
220 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, | |
f3426583 JS |
221 | {"mac_rx_oversize_pkt_num", |
222 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, | |
46a3df9f S |
223 | {"mac_rx_64_oct_pkt_num", |
224 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, | |
225 | {"mac_rx_65_127_oct_pkt_num", | |
226 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, | |
227 | {"mac_rx_128_255_oct_pkt_num", | |
228 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, | |
229 | {"mac_rx_256_511_oct_pkt_num", | |
230 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, | |
231 | {"mac_rx_512_1023_oct_pkt_num", | |
232 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, | |
233 | {"mac_rx_1024_1518_oct_pkt_num", | |
234 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, | |
b42874e4 JS |
235 | {"mac_rx_1519_2047_oct_pkt_num", |
236 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, | |
237 | {"mac_rx_2048_4095_oct_pkt_num", | |
238 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, | |
239 | {"mac_rx_4096_8191_oct_pkt_num", | |
240 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, | |
b42874e4 JS |
241 | {"mac_rx_8192_9216_oct_pkt_num", |
242 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, | |
243 | {"mac_rx_9217_12287_oct_pkt_num", | |
244 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, | |
245 | {"mac_rx_12288_16383_oct_pkt_num", | |
246 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, | |
247 | {"mac_rx_1519_max_good_pkt_num", | |
248 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, | |
249 | {"mac_rx_1519_max_bad_pkt_num", | |
250 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f | 251 | |
c36317be JS |
252 | {"mac_tx_fragment_pkt_num", |
253 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, | |
254 | {"mac_tx_undermin_pkt_num", | |
255 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, | |
256 | {"mac_tx_jabber_pkt_num", | |
257 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, | |
258 | {"mac_tx_err_all_pkt_num", | |
259 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, | |
260 | {"mac_tx_from_app_good_pkt_num", | |
261 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, | |
262 | {"mac_tx_from_app_bad_pkt_num", | |
263 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, | |
264 | {"mac_rx_fragment_pkt_num", | |
265 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, | |
266 | {"mac_rx_undermin_pkt_num", | |
267 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, | |
268 | {"mac_rx_jabber_pkt_num", | |
269 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, | |
270 | {"mac_rx_fcs_err_pkt_num", | |
271 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, | |
272 | {"mac_rx_send_app_good_pkt_num", | |
273 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, | |
274 | {"mac_rx_send_app_bad_pkt_num", | |
275 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} | |
46a3df9f S |
276 | }; |
277 | ||
635bfb58 FL |
278 | static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { |
279 | { | |
280 | .flags = HCLGE_MAC_MGR_MASK_VLAN_B, | |
281 | .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), | |
282 | .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), | |
283 | .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), | |
284 | .i_port_bitmap = 0x1, | |
285 | }, | |
286 | }; | |
287 | ||
46a3df9f S |
288 | static int hclge_mac_update_stats(struct hclge_dev *hdev) |
289 | { | |
b42874e4 | 290 | #define HCLGE_MAC_CMD_NUM 21 |
46a3df9f S |
291 | #define HCLGE_RTN_DATA_NUM 4 |
292 | ||
293 | u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); | |
294 | struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; | |
a90bb9a5 | 295 | __le64 *desc_data; |
46a3df9f S |
296 | int i, k, n; |
297 | int ret; | |
298 | ||
299 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); | |
300 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); | |
301 | if (ret) { | |
302 | dev_err(&hdev->pdev->dev, | |
303 | "Get MAC pkt stats fail, status = %d.\n", ret); | |
304 | ||
305 | return ret; | |
306 | } | |
307 | ||
308 | for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { | |
309 | if (unlikely(i == 0)) { | |
a90bb9a5 | 310 | desc_data = (__le64 *)(&desc[i].data[0]); |
46a3df9f S |
311 | n = HCLGE_RTN_DATA_NUM - 2; |
312 | } else { | |
a90bb9a5 | 313 | desc_data = (__le64 *)(&desc[i]); |
46a3df9f S |
314 | n = HCLGE_RTN_DATA_NUM; |
315 | } | |
316 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 317 | *data++ += le64_to_cpu(*desc_data); |
46a3df9f S |
318 | desc_data++; |
319 | } | |
320 | } | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | static int hclge_tqps_update_stats(struct hnae3_handle *handle) | |
326 | { | |
327 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
328 | struct hclge_vport *vport = hclge_get_vport(handle); | |
329 | struct hclge_dev *hdev = vport->back; | |
330 | struct hnae3_queue *queue; | |
331 | struct hclge_desc desc[1]; | |
332 | struct hclge_tqp *tqp; | |
333 | int ret, i; | |
334 | ||
335 | for (i = 0; i < kinfo->num_tqps; i++) { | |
336 | queue = handle->kinfo.tqp[i]; | |
337 | tqp = container_of(queue, struct hclge_tqp, q); | |
338 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
339 | hclge_cmd_setup_basic_desc(&desc[0], | |
340 | HCLGE_OPC_QUERY_RX_STATUS, | |
341 | true); | |
342 | ||
a90bb9a5 | 343 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
344 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
345 | if (ret) { | |
346 | dev_err(&hdev->pdev->dev, | |
347 | "Query tqp stat fail, status = %d,queue = %d\n", | |
348 | ret, i); | |
349 | return ret; | |
350 | } | |
351 | tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += | |
93991b65 | 352 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
353 | } |
354 | ||
355 | for (i = 0; i < kinfo->num_tqps; i++) { | |
356 | queue = handle->kinfo.tqp[i]; | |
357 | tqp = container_of(queue, struct hclge_tqp, q); | |
358 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
359 | hclge_cmd_setup_basic_desc(&desc[0], | |
360 | HCLGE_OPC_QUERY_TX_STATUS, | |
361 | true); | |
362 | ||
a90bb9a5 | 363 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
364 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
365 | if (ret) { | |
366 | dev_err(&hdev->pdev->dev, | |
367 | "Query tqp stat fail, status = %d,queue = %d\n", | |
368 | ret, i); | |
369 | return ret; | |
370 | } | |
371 | tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += | |
93991b65 | 372 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
373 | } |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) | |
379 | { | |
380 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
381 | struct hclge_tqp *tqp; | |
382 | u64 *buff = data; | |
383 | int i; | |
384 | ||
385 | for (i = 0; i < kinfo->num_tqps; i++) { | |
386 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 387 | *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; |
46a3df9f S |
388 | } |
389 | ||
390 | for (i = 0; i < kinfo->num_tqps; i++) { | |
391 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 392 | *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; |
46a3df9f S |
393 | } |
394 | ||
395 | return buff; | |
396 | } | |
397 | ||
398 | static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) | |
399 | { | |
400 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
401 | ||
402 | return kinfo->num_tqps * (2); | |
403 | } | |
404 | ||
405 | static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) | |
406 | { | |
407 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
408 | u8 *buff = data; | |
409 | int i = 0; | |
410 | ||
411 | for (i = 0; i < kinfo->num_tqps; i++) { | |
412 | struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], | |
413 | struct hclge_tqp, q); | |
eedff8c0 | 414 | snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", |
46a3df9f S |
415 | tqp->index); |
416 | buff = buff + ETH_GSTRING_LEN; | |
417 | } | |
418 | ||
419 | for (i = 0; i < kinfo->num_tqps; i++) { | |
420 | struct hclge_tqp *tqp = container_of(kinfo->tqp[i], | |
421 | struct hclge_tqp, q); | |
eedff8c0 | 422 | snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", |
46a3df9f S |
423 | tqp->index); |
424 | buff = buff + ETH_GSTRING_LEN; | |
425 | } | |
426 | ||
427 | return buff; | |
428 | } | |
429 | ||
430 | static u64 *hclge_comm_get_stats(void *comm_stats, | |
431 | const struct hclge_comm_stats_str strs[], | |
432 | int size, u64 *data) | |
433 | { | |
434 | u64 *buf = data; | |
435 | u32 i; | |
436 | ||
437 | for (i = 0; i < size; i++) | |
438 | buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); | |
439 | ||
440 | return buf + size; | |
441 | } | |
442 | ||
443 | static u8 *hclge_comm_get_strings(u32 stringset, | |
444 | const struct hclge_comm_stats_str strs[], | |
445 | int size, u8 *data) | |
446 | { | |
447 | char *buff = (char *)data; | |
448 | u32 i; | |
449 | ||
450 | if (stringset != ETH_SS_STATS) | |
451 | return buff; | |
452 | ||
453 | for (i = 0; i < size; i++) { | |
454 | snprintf(buff, ETH_GSTRING_LEN, | |
455 | strs[i].desc); | |
456 | buff = buff + ETH_GSTRING_LEN; | |
457 | } | |
458 | ||
459 | return (u8 *)buff; | |
460 | } | |
461 | ||
462 | static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, | |
463 | struct net_device_stats *net_stats) | |
464 | { | |
465 | net_stats->tx_dropped = 0; | |
f3426583 | 466 | net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 467 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; |
c36317be | 468 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
469 | |
470 | net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; | |
471 | net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; | |
472 | ||
c36317be | 473 | net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
474 | net_stats->rx_length_errors = |
475 | hw_stats->mac_stats.mac_rx_undersize_pkt_num; | |
476 | net_stats->rx_length_errors += | |
f3426583 | 477 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 478 | net_stats->rx_over_errors = |
f3426583 | 479 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f S |
480 | } |
481 | ||
482 | static void hclge_update_stats_for_all(struct hclge_dev *hdev) | |
483 | { | |
484 | struct hnae3_handle *handle; | |
485 | int status; | |
486 | ||
487 | handle = &hdev->vport[0].nic; | |
488 | if (handle->client) { | |
489 | status = hclge_tqps_update_stats(handle); | |
490 | if (status) { | |
491 | dev_err(&hdev->pdev->dev, | |
492 | "Update TQPS stats fail, status = %d.\n", | |
493 | status); | |
494 | } | |
495 | } | |
496 | ||
497 | status = hclge_mac_update_stats(hdev); | |
498 | if (status) | |
499 | dev_err(&hdev->pdev->dev, | |
500 | "Update MAC stats fail, status = %d.\n", status); | |
501 | ||
46a3df9f S |
502 | hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); |
503 | } | |
504 | ||
505 | static void hclge_update_stats(struct hnae3_handle *handle, | |
506 | struct net_device_stats *net_stats) | |
507 | { | |
508 | struct hclge_vport *vport = hclge_get_vport(handle); | |
509 | struct hclge_dev *hdev = vport->back; | |
510 | struct hclge_hw_stats *hw_stats = &hdev->hw_stats; | |
511 | int status; | |
512 | ||
7a5d2a39 JS |
513 | if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) |
514 | return; | |
515 | ||
46a3df9f S |
516 | status = hclge_mac_update_stats(hdev); |
517 | if (status) | |
518 | dev_err(&hdev->pdev->dev, | |
519 | "Update MAC stats fail, status = %d.\n", | |
520 | status); | |
521 | ||
46a3df9f S |
522 | status = hclge_tqps_update_stats(handle); |
523 | if (status) | |
524 | dev_err(&hdev->pdev->dev, | |
525 | "Update TQPS stats fail, status = %d.\n", | |
526 | status); | |
527 | ||
528 | hclge_update_netstat(hw_stats, net_stats); | |
7a5d2a39 JS |
529 | |
530 | clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); | |
46a3df9f S |
531 | } |
532 | ||
533 | static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) | |
534 | { | |
86957272 FL |
535 | #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ |
536 | HNAE3_SUPPORT_PHY_LOOPBACK |\ | |
537 | HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ | |
538 | HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) | |
46a3df9f S |
539 | |
540 | struct hclge_vport *vport = hclge_get_vport(handle); | |
541 | struct hclge_dev *hdev = vport->back; | |
542 | int count = 0; | |
543 | ||
544 | /* Loopback test support rules: | |
545 | * mac: only GE mode support | |
546 | * serdes: all mac mode will support include GE/XGE/LGE/CGE | |
547 | * phy: only support when phy device exist on board | |
548 | */ | |
549 | if (stringset == ETH_SS_TEST) { | |
550 | /* clear loopback bit flags at first */ | |
551 | handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); | |
735f1df8 | 552 | if (hdev->pdev->revision >= 0x21 || |
86957272 | 553 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || |
46a3df9f S |
554 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || |
555 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { | |
556 | count += 1; | |
67b8c316 | 557 | handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; |
46a3df9f | 558 | } |
e006bb00 | 559 | |
86957272 FL |
560 | count += 2; |
561 | handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; | |
562 | handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; | |
46a3df9f S |
563 | } else if (stringset == ETH_SS_STATS) { |
564 | count = ARRAY_SIZE(g_mac_stats_string) + | |
46a3df9f S |
565 | hclge_tqps_get_sset_count(handle, stringset); |
566 | } | |
567 | ||
568 | return count; | |
569 | } | |
570 | ||
571 | static void hclge_get_strings(struct hnae3_handle *handle, | |
572 | u32 stringset, | |
573 | u8 *data) | |
574 | { | |
575 | u8 *p = (char *)data; | |
576 | int size; | |
577 | ||
578 | if (stringset == ETH_SS_STATS) { | |
579 | size = ARRAY_SIZE(g_mac_stats_string); | |
580 | p = hclge_comm_get_strings(stringset, | |
581 | g_mac_stats_string, | |
582 | size, | |
583 | p); | |
46a3df9f S |
584 | p = hclge_tqps_get_strings(handle, p); |
585 | } else if (stringset == ETH_SS_TEST) { | |
67b8c316 | 586 | if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { |
46a3df9f | 587 | memcpy(p, |
67b8c316 | 588 | hns3_nic_test_strs[HNAE3_LOOP_APP], |
46a3df9f S |
589 | ETH_GSTRING_LEN); |
590 | p += ETH_GSTRING_LEN; | |
591 | } | |
86957272 | 592 | if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { |
46a3df9f | 593 | memcpy(p, |
86957272 FL |
594 | hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], |
595 | ETH_GSTRING_LEN); | |
596 | p += ETH_GSTRING_LEN; | |
597 | } | |
598 | if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { | |
599 | memcpy(p, | |
600 | hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], | |
46a3df9f S |
601 | ETH_GSTRING_LEN); |
602 | p += ETH_GSTRING_LEN; | |
603 | } | |
604 | if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { | |
605 | memcpy(p, | |
e05cfaaf | 606 | hns3_nic_test_strs[HNAE3_LOOP_PHY], |
46a3df9f S |
607 | ETH_GSTRING_LEN); |
608 | p += ETH_GSTRING_LEN; | |
609 | } | |
610 | } | |
611 | } | |
612 | ||
613 | static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) | |
614 | { | |
615 | struct hclge_vport *vport = hclge_get_vport(handle); | |
616 | struct hclge_dev *hdev = vport->back; | |
617 | u64 *p; | |
618 | ||
619 | p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, | |
620 | g_mac_stats_string, | |
621 | ARRAY_SIZE(g_mac_stats_string), | |
622 | data); | |
46a3df9f S |
623 | p = hclge_tqps_get_stats(handle, p); |
624 | } | |
625 | ||
626 | static int hclge_parse_func_status(struct hclge_dev *hdev, | |
d44f9b63 | 627 | struct hclge_func_status_cmd *status) |
46a3df9f S |
628 | { |
629 | if (!(status->pf_state & HCLGE_PF_STATE_DONE)) | |
630 | return -EINVAL; | |
631 | ||
632 | /* Set the pf to main pf */ | |
633 | if (status->pf_state & HCLGE_PF_STATE_MAIN) | |
634 | hdev->flag |= HCLGE_FLAG_MAIN; | |
635 | else | |
636 | hdev->flag &= ~HCLGE_FLAG_MAIN; | |
637 | ||
46a3df9f S |
638 | return 0; |
639 | } | |
640 | ||
641 | static int hclge_query_function_status(struct hclge_dev *hdev) | |
642 | { | |
d44f9b63 | 643 | struct hclge_func_status_cmd *req; |
46a3df9f S |
644 | struct hclge_desc desc; |
645 | int timeout = 0; | |
646 | int ret; | |
647 | ||
648 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); | |
d44f9b63 | 649 | req = (struct hclge_func_status_cmd *)desc.data; |
46a3df9f S |
650 | |
651 | do { | |
652 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
653 | if (ret) { | |
654 | dev_err(&hdev->pdev->dev, | |
655 | "query function status failed %d.\n", | |
656 | ret); | |
657 | ||
658 | return ret; | |
659 | } | |
660 | ||
661 | /* Check pf reset is done */ | |
662 | if (req->pf_state) | |
663 | break; | |
664 | usleep_range(1000, 2000); | |
665 | } while (timeout++ < 5); | |
666 | ||
667 | ret = hclge_parse_func_status(hdev, req); | |
668 | ||
669 | return ret; | |
670 | } | |
671 | ||
672 | static int hclge_query_pf_resource(struct hclge_dev *hdev) | |
673 | { | |
d44f9b63 | 674 | struct hclge_pf_res_cmd *req; |
46a3df9f S |
675 | struct hclge_desc desc; |
676 | int ret; | |
677 | ||
678 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); | |
679 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
680 | if (ret) { | |
681 | dev_err(&hdev->pdev->dev, | |
682 | "query pf resource failed %d.\n", ret); | |
683 | return ret; | |
684 | } | |
685 | ||
d44f9b63 | 686 | req = (struct hclge_pf_res_cmd *)desc.data; |
46a3df9f S |
687 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); |
688 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; | |
689 | ||
e92a0843 | 690 | if (hnae3_dev_roce_supported(hdev)) { |
5355e6d3 JS |
691 | hdev->roce_base_msix_offset = |
692 | hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), | |
693 | HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); | |
887c3820 | 694 | hdev->num_roce_msi = |
ccc23ef3 PL |
695 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
696 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
697 | |
698 | /* PF should have NIC vectors and Roce vectors, | |
699 | * NIC vectors are queued before Roce vectors. | |
700 | */ | |
5355e6d3 JS |
701 | hdev->num_msi = hdev->num_roce_msi + |
702 | hdev->roce_base_msix_offset; | |
46a3df9f S |
703 | } else { |
704 | hdev->num_msi = | |
ccc23ef3 PL |
705 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
706 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
707 | } |
708 | ||
709 | return 0; | |
710 | } | |
711 | ||
712 | static int hclge_parse_speed(int speed_cmd, int *speed) | |
713 | { | |
714 | switch (speed_cmd) { | |
715 | case 6: | |
716 | *speed = HCLGE_MAC_SPEED_10M; | |
717 | break; | |
718 | case 7: | |
719 | *speed = HCLGE_MAC_SPEED_100M; | |
720 | break; | |
721 | case 0: | |
722 | *speed = HCLGE_MAC_SPEED_1G; | |
723 | break; | |
724 | case 1: | |
725 | *speed = HCLGE_MAC_SPEED_10G; | |
726 | break; | |
727 | case 2: | |
728 | *speed = HCLGE_MAC_SPEED_25G; | |
729 | break; | |
730 | case 3: | |
731 | *speed = HCLGE_MAC_SPEED_40G; | |
732 | break; | |
733 | case 4: | |
734 | *speed = HCLGE_MAC_SPEED_50G; | |
735 | break; | |
736 | case 5: | |
737 | *speed = HCLGE_MAC_SPEED_100G; | |
738 | break; | |
739 | default: | |
740 | return -EINVAL; | |
741 | } | |
742 | ||
743 | return 0; | |
744 | } | |
745 | ||
d92ceae9 FL |
746 | static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, |
747 | u8 speed_ability) | |
748 | { | |
749 | unsigned long *supported = hdev->hw.mac.supported; | |
750 | ||
751 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) | |
752 | set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, | |
753 | supported); | |
754 | ||
755 | if (speed_ability & HCLGE_SUPPORT_10G_BIT) | |
756 | set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, | |
757 | supported); | |
758 | ||
759 | if (speed_ability & HCLGE_SUPPORT_25G_BIT) | |
760 | set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, | |
761 | supported); | |
762 | ||
763 | if (speed_ability & HCLGE_SUPPORT_50G_BIT) | |
764 | set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, | |
765 | supported); | |
766 | ||
767 | if (speed_ability & HCLGE_SUPPORT_100G_BIT) | |
768 | set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, | |
769 | supported); | |
770 | ||
771 | set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); | |
772 | set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); | |
773 | } | |
774 | ||
775 | static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) | |
776 | { | |
777 | u8 media_type = hdev->hw.mac.media_type; | |
778 | ||
779 | if (media_type != HNAE3_MEDIA_TYPE_FIBER) | |
780 | return; | |
781 | ||
782 | hclge_parse_fiber_link_mode(hdev, speed_ability); | |
783 | } | |
784 | ||
46a3df9f S |
785 | static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) |
786 | { | |
d44f9b63 | 787 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
788 | u64 mac_addr_tmp_high; |
789 | u64 mac_addr_tmp; | |
790 | int i; | |
791 | ||
d44f9b63 | 792 | req = (struct hclge_cfg_param_cmd *)desc[0].data; |
46a3df9f S |
793 | |
794 | /* get the configuration */ | |
ccc23ef3 PL |
795 | cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), |
796 | HCLGE_CFG_VMDQ_M, | |
797 | HCLGE_CFG_VMDQ_S); | |
798 | cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
799 | HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); | |
800 | cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
801 | HCLGE_CFG_TQP_DESC_N_M, | |
802 | HCLGE_CFG_TQP_DESC_N_S); | |
803 | ||
804 | cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
805 | HCLGE_CFG_PHY_ADDR_M, | |
806 | HCLGE_CFG_PHY_ADDR_S); | |
807 | cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
808 | HCLGE_CFG_MEDIA_TP_M, | |
809 | HCLGE_CFG_MEDIA_TP_S); | |
810 | cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
811 | HCLGE_CFG_RX_BUF_LEN_M, | |
812 | HCLGE_CFG_RX_BUF_LEN_S); | |
46a3df9f S |
813 | /* get mac_address */ |
814 | mac_addr_tmp = __le32_to_cpu(req->param[2]); | |
ccc23ef3 PL |
815 | mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), |
816 | HCLGE_CFG_MAC_ADDR_H_M, | |
817 | HCLGE_CFG_MAC_ADDR_H_S); | |
46a3df9f S |
818 | |
819 | mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; | |
820 | ||
ccc23ef3 PL |
821 | cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), |
822 | HCLGE_CFG_DEFAULT_SPEED_M, | |
823 | HCLGE_CFG_DEFAULT_SPEED_S); | |
824 | cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), | |
825 | HCLGE_CFG_RSS_SIZE_M, | |
826 | HCLGE_CFG_RSS_SIZE_S); | |
c408e202 | 827 | |
46a3df9f S |
828 | for (i = 0; i < ETH_ALEN; i++) |
829 | cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; | |
830 | ||
d44f9b63 | 831 | req = (struct hclge_cfg_param_cmd *)desc[1].data; |
46a3df9f | 832 | cfg->numa_node_map = __le32_to_cpu(req->param[0]); |
d92ceae9 | 833 | |
ccc23ef3 PL |
834 | cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), |
835 | HCLGE_CFG_SPEED_ABILITY_M, | |
836 | HCLGE_CFG_SPEED_ABILITY_S); | |
2da5ec58 JS |
837 | cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), |
838 | HCLGE_CFG_UMV_TBL_SPACE_M, | |
839 | HCLGE_CFG_UMV_TBL_SPACE_S); | |
840 | if (!cfg->umv_space) | |
841 | cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; | |
46a3df9f S |
842 | } |
843 | ||
844 | /* hclge_get_cfg: query the static parameter from flash | |
845 | * @hdev: pointer to struct hclge_dev | |
846 | * @hcfg: the config structure to be getted | |
847 | */ | |
848 | static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) | |
849 | { | |
850 | struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; | |
d44f9b63 | 851 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
852 | int i, ret; |
853 | ||
854 | for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { | |
a90bb9a5 YL |
855 | u32 offset = 0; |
856 | ||
d44f9b63 | 857 | req = (struct hclge_cfg_param_cmd *)desc[i].data; |
46a3df9f S |
858 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, |
859 | true); | |
ccc23ef3 PL |
860 | hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, |
861 | HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); | |
46a3df9f | 862 | /* Len should be united by 4 bytes when send to hardware */ |
ccc23ef3 PL |
863 | hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, |
864 | HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); | |
a90bb9a5 | 865 | req->offset = cpu_to_le32(offset); |
46a3df9f S |
866 | } |
867 | ||
868 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); | |
869 | if (ret) { | |
90415e85 | 870 | dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); |
46a3df9f S |
871 | return ret; |
872 | } | |
873 | ||
874 | hclge_parse_cfg(hcfg, desc); | |
90415e85 | 875 | |
46a3df9f S |
876 | return 0; |
877 | } | |
878 | ||
879 | static int hclge_get_cap(struct hclge_dev *hdev) | |
880 | { | |
881 | int ret; | |
882 | ||
883 | ret = hclge_query_function_status(hdev); | |
884 | if (ret) { | |
885 | dev_err(&hdev->pdev->dev, | |
886 | "query function status error %d.\n", ret); | |
887 | return ret; | |
888 | } | |
889 | ||
890 | /* get pf resource */ | |
891 | ret = hclge_query_pf_resource(hdev); | |
90415e85 JS |
892 | if (ret) |
893 | dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); | |
46a3df9f | 894 | |
90415e85 | 895 | return ret; |
46a3df9f S |
896 | } |
897 | ||
898 | static int hclge_configure(struct hclge_dev *hdev) | |
899 | { | |
900 | struct hclge_cfg cfg; | |
901 | int ret, i; | |
902 | ||
903 | ret = hclge_get_cfg(hdev, &cfg); | |
904 | if (ret) { | |
905 | dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); | |
906 | return ret; | |
907 | } | |
908 | ||
909 | hdev->num_vmdq_vport = cfg.vmdq_vport_num; | |
910 | hdev->base_tqp_pid = 0; | |
c408e202 | 911 | hdev->rss_size_max = cfg.rss_size_max; |
46a3df9f | 912 | hdev->rx_buf_len = cfg.rx_buf_len; |
fbbb1536 | 913 | ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); |
46a3df9f | 914 | hdev->hw.mac.media_type = cfg.media_type; |
2a4776e1 | 915 | hdev->hw.mac.phy_addr = cfg.phy_addr; |
46a3df9f S |
916 | hdev->num_desc = cfg.tqp_desc_num; |
917 | hdev->tm_info.num_pg = 1; | |
cacde272 | 918 | hdev->tc_max = cfg.tc_num; |
46a3df9f | 919 | hdev->tm_info.hw_pfc_map = 0; |
2da5ec58 | 920 | hdev->wanted_umv_size = cfg.umv_space; |
46a3df9f S |
921 | |
922 | ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); | |
923 | if (ret) { | |
924 | dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); | |
925 | return ret; | |
926 | } | |
927 | ||
d92ceae9 FL |
928 | hclge_parse_link_mode(hdev, cfg.speed_ability); |
929 | ||
cacde272 YL |
930 | if ((hdev->tc_max > HNAE3_MAX_TC) || |
931 | (hdev->tc_max < 1)) { | |
46a3df9f | 932 | dev_warn(&hdev->pdev->dev, "TC num = %d.\n", |
cacde272 YL |
933 | hdev->tc_max); |
934 | hdev->tc_max = 1; | |
46a3df9f S |
935 | } |
936 | ||
cacde272 YL |
937 | /* Dev does not support DCB */ |
938 | if (!hnae3_dev_dcb_supported(hdev)) { | |
939 | hdev->tc_max = 1; | |
940 | hdev->pfc_max = 0; | |
941 | } else { | |
942 | hdev->pfc_max = hdev->tc_max; | |
943 | } | |
944 | ||
945 | hdev->tm_info.num_tc = hdev->tc_max; | |
946 | ||
46a3df9f | 947 | /* Currently not support uncontiuous tc */ |
cacde272 | 948 | for (i = 0; i < hdev->tm_info.num_tc; i++) |
ccc23ef3 | 949 | hnae3_set_bit(hdev->hw_tc_map, i, 1); |
46a3df9f | 950 | |
f8362fe1 | 951 | hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; |
46a3df9f S |
952 | |
953 | return ret; | |
954 | } | |
955 | ||
956 | static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, | |
957 | int tso_mss_max) | |
958 | { | |
d44f9b63 | 959 | struct hclge_cfg_tso_status_cmd *req; |
46a3df9f | 960 | struct hclge_desc desc; |
a90bb9a5 | 961 | u16 tso_mss; |
46a3df9f S |
962 | |
963 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); | |
964 | ||
d44f9b63 | 965 | req = (struct hclge_cfg_tso_status_cmd *)desc.data; |
a90bb9a5 YL |
966 | |
967 | tso_mss = 0; | |
ccc23ef3 PL |
968 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
969 | HCLGE_TSO_MSS_MIN_S, tso_mss_min); | |
a90bb9a5 YL |
970 | req->tso_mss_min = cpu_to_le16(tso_mss); |
971 | ||
972 | tso_mss = 0; | |
ccc23ef3 PL |
973 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
974 | HCLGE_TSO_MSS_MIN_S, tso_mss_max); | |
a90bb9a5 | 975 | req->tso_mss_max = cpu_to_le16(tso_mss); |
46a3df9f S |
976 | |
977 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
978 | } | |
979 | ||
73f88b00 PL |
980 | static int hclge_config_gro(struct hclge_dev *hdev, bool en) |
981 | { | |
982 | struct hclge_cfg_gro_status_cmd *req; | |
983 | struct hclge_desc desc; | |
984 | int ret; | |
985 | ||
986 | if (!hnae3_dev_gro_supported(hdev)) | |
987 | return 0; | |
988 | ||
989 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); | |
990 | req = (struct hclge_cfg_gro_status_cmd *)desc.data; | |
991 | ||
992 | req->gro_en = cpu_to_le16(en ? 1 : 0); | |
993 | ||
994 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
995 | if (ret) | |
996 | dev_err(&hdev->pdev->dev, | |
997 | "GRO hardware config cmd failed, ret = %d\n", ret); | |
998 | ||
999 | return ret; | |
1000 | } | |
1001 | ||
46a3df9f S |
1002 | static int hclge_alloc_tqps(struct hclge_dev *hdev) |
1003 | { | |
1004 | struct hclge_tqp *tqp; | |
1005 | int i; | |
1006 | ||
1007 | hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, | |
1008 | sizeof(struct hclge_tqp), GFP_KERNEL); | |
1009 | if (!hdev->htqp) | |
1010 | return -ENOMEM; | |
1011 | ||
1012 | tqp = hdev->htqp; | |
1013 | ||
1014 | for (i = 0; i < hdev->num_tqps; i++) { | |
1015 | tqp->dev = &hdev->pdev->dev; | |
1016 | tqp->index = i; | |
1017 | ||
1018 | tqp->q.ae_algo = &ae_algo; | |
1019 | tqp->q.buf_size = hdev->rx_buf_len; | |
1020 | tqp->q.desc_num = hdev->num_desc; | |
1021 | tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + | |
1022 | i * HCLGE_TQP_REG_SIZE; | |
1023 | ||
1024 | tqp++; | |
1025 | } | |
1026 | ||
1027 | return 0; | |
1028 | } | |
1029 | ||
1030 | static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, | |
1031 | u16 tqp_pid, u16 tqp_vid, bool is_pf) | |
1032 | { | |
d44f9b63 | 1033 | struct hclge_tqp_map_cmd *req; |
46a3df9f S |
1034 | struct hclge_desc desc; |
1035 | int ret; | |
1036 | ||
1037 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); | |
1038 | ||
d44f9b63 | 1039 | req = (struct hclge_tqp_map_cmd *)desc.data; |
46a3df9f | 1040 | req->tqp_id = cpu_to_le16(tqp_pid); |
a90bb9a5 | 1041 | req->tqp_vf = func_id; |
46a3df9f S |
1042 | req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | |
1043 | 1 << HCLGE_TQP_MAP_EN_B; | |
1044 | req->tqp_vid = cpu_to_le16(tqp_vid); | |
1045 | ||
1046 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 JS |
1047 | if (ret) |
1048 | dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); | |
46a3df9f | 1049 | |
90415e85 | 1050 | return ret; |
46a3df9f S |
1051 | } |
1052 | ||
81356b1f | 1053 | static int hclge_assign_tqp(struct hclge_vport *vport) |
46a3df9f | 1054 | { |
81356b1f | 1055 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
46a3df9f | 1056 | struct hclge_dev *hdev = vport->back; |
7df7dad6 | 1057 | int i, alloced; |
46a3df9f S |
1058 | |
1059 | for (i = 0, alloced = 0; i < hdev->num_tqps && | |
81356b1f | 1060 | alloced < kinfo->num_tqps; i++) { |
46a3df9f S |
1061 | if (!hdev->htqp[i].alloced) { |
1062 | hdev->htqp[i].q.handle = &vport->nic; | |
1063 | hdev->htqp[i].q.tqp_index = alloced; | |
81356b1f YL |
1064 | hdev->htqp[i].q.desc_num = kinfo->num_desc; |
1065 | kinfo->tqp[alloced] = &hdev->htqp[i].q; | |
46a3df9f | 1066 | hdev->htqp[i].alloced = true; |
46a3df9f S |
1067 | alloced++; |
1068 | } | |
1069 | } | |
81356b1f | 1070 | vport->alloc_tqps = kinfo->num_tqps; |
46a3df9f S |
1071 | |
1072 | return 0; | |
1073 | } | |
1074 | ||
81356b1f YL |
1075 | static int hclge_knic_setup(struct hclge_vport *vport, |
1076 | u16 num_tqps, u16 num_desc) | |
46a3df9f S |
1077 | { |
1078 | struct hnae3_handle *nic = &vport->nic; | |
1079 | struct hnae3_knic_private_info *kinfo = &nic->kinfo; | |
1080 | struct hclge_dev *hdev = vport->back; | |
1081 | int i, ret; | |
1082 | ||
81356b1f | 1083 | kinfo->num_desc = num_desc; |
46a3df9f S |
1084 | kinfo->rx_buf_len = hdev->rx_buf_len; |
1085 | kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); | |
1086 | kinfo->rss_size | |
1087 | = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); | |
1088 | kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; | |
1089 | ||
1090 | for (i = 0; i < HNAE3_MAX_TC; i++) { | |
1091 | if (hdev->hw_tc_map & BIT(i)) { | |
1092 | kinfo->tc_info[i].enable = true; | |
1093 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; | |
1094 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; | |
1095 | kinfo->tc_info[i].tc = i; | |
1096 | } else { | |
1097 | /* Set to default queue if TC is disable */ | |
1098 | kinfo->tc_info[i].enable = false; | |
1099 | kinfo->tc_info[i].tqp_offset = 0; | |
1100 | kinfo->tc_info[i].tqp_count = 1; | |
1101 | kinfo->tc_info[i].tc = 0; | |
1102 | } | |
1103 | } | |
1104 | ||
1105 | kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, | |
1106 | sizeof(struct hnae3_queue *), GFP_KERNEL); | |
1107 | if (!kinfo->tqp) | |
1108 | return -ENOMEM; | |
1109 | ||
81356b1f | 1110 | ret = hclge_assign_tqp(vport); |
90415e85 | 1111 | if (ret) |
46a3df9f | 1112 | dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); |
46a3df9f | 1113 | |
90415e85 | 1114 | return ret; |
46a3df9f S |
1115 | } |
1116 | ||
7df7dad6 L |
1117 | static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, |
1118 | struct hclge_vport *vport) | |
1119 | { | |
1120 | struct hnae3_handle *nic = &vport->nic; | |
1121 | struct hnae3_knic_private_info *kinfo; | |
1122 | u16 i; | |
1123 | ||
1124 | kinfo = &nic->kinfo; | |
1125 | for (i = 0; i < kinfo->num_tqps; i++) { | |
1126 | struct hclge_tqp *q = | |
1127 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
1128 | bool is_pf; | |
1129 | int ret; | |
1130 | ||
1131 | is_pf = !(vport->vport_id); | |
1132 | ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, | |
1133 | i, is_pf); | |
1134 | if (ret) | |
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | static int hclge_map_tqp(struct hclge_dev *hdev) | |
1142 | { | |
1143 | struct hclge_vport *vport = hdev->vport; | |
1144 | u16 i, num_vport; | |
1145 | ||
1146 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1147 | for (i = 0; i < num_vport; i++) { | |
1148 | int ret; | |
1149 | ||
1150 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
1151 | if (ret) | |
1152 | return ret; | |
1153 | ||
1154 | vport++; | |
1155 | } | |
1156 | ||
1157 | return 0; | |
1158 | } | |
1159 | ||
46a3df9f S |
1160 | static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) |
1161 | { | |
1162 | /* this would be initialized later */ | |
1163 | } | |
1164 | ||
1165 | static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) | |
1166 | { | |
1167 | struct hnae3_handle *nic = &vport->nic; | |
1168 | struct hclge_dev *hdev = vport->back; | |
1169 | int ret; | |
1170 | ||
1171 | nic->pdev = hdev->pdev; | |
1172 | nic->ae_algo = &ae_algo; | |
1173 | nic->numa_node_mask = hdev->numa_node_mask; | |
1174 | ||
1175 | if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { | |
81356b1f | 1176 | ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); |
46a3df9f S |
1177 | if (ret) { |
1178 | dev_err(&hdev->pdev->dev, "knic setup failed %d\n", | |
1179 | ret); | |
1180 | return ret; | |
1181 | } | |
1182 | } else { | |
1183 | hclge_unic_setup(vport, num_tqps); | |
1184 | } | |
1185 | ||
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | static int hclge_alloc_vport(struct hclge_dev *hdev) | |
1190 | { | |
1191 | struct pci_dev *pdev = hdev->pdev; | |
1192 | struct hclge_vport *vport; | |
1193 | u32 tqp_main_vport; | |
1194 | u32 tqp_per_vport; | |
1195 | int num_vport, i; | |
1196 | int ret; | |
1197 | ||
1198 | /* We need to alloc a vport for main NIC of PF */ | |
1199 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1200 | ||
b76edfb2 HT |
1201 | if (hdev->num_tqps < num_vport) { |
1202 | dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", | |
1203 | hdev->num_tqps, num_vport); | |
1204 | return -EINVAL; | |
1205 | } | |
46a3df9f S |
1206 | |
1207 | /* Alloc the same number of TQPs for every vport */ | |
1208 | tqp_per_vport = hdev->num_tqps / num_vport; | |
1209 | tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; | |
1210 | ||
1211 | vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), | |
1212 | GFP_KERNEL); | |
1213 | if (!vport) | |
1214 | return -ENOMEM; | |
1215 | ||
1216 | hdev->vport = vport; | |
1217 | hdev->num_alloc_vport = num_vport; | |
1218 | ||
bc59f827 FL |
1219 | if (IS_ENABLED(CONFIG_PCI_IOV)) |
1220 | hdev->num_alloc_vfs = hdev->num_req_vfs; | |
46a3df9f S |
1221 | |
1222 | for (i = 0; i < num_vport; i++) { | |
1223 | vport->back = hdev; | |
1224 | vport->vport_id = i; | |
b2c04029 | 1225 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; |
46a3df9f S |
1226 | |
1227 | if (i == 0) | |
1228 | ret = hclge_vport_setup(vport, tqp_main_vport); | |
1229 | else | |
1230 | ret = hclge_vport_setup(vport, tqp_per_vport); | |
1231 | if (ret) { | |
1232 | dev_err(&pdev->dev, | |
1233 | "vport setup failed for vport %d, %d\n", | |
1234 | i, ret); | |
1235 | return ret; | |
1236 | } | |
1237 | ||
1238 | vport++; | |
1239 | } | |
1240 | ||
1241 | return 0; | |
1242 | } | |
1243 | ||
acf61ecd YL |
1244 | static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, |
1245 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1246 | { |
1247 | /* TX buffer size is unit by 128 byte */ | |
1248 | #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 | |
1249 | #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) | |
d44f9b63 | 1250 | struct hclge_tx_buff_alloc_cmd *req; |
46a3df9f S |
1251 | struct hclge_desc desc; |
1252 | int ret; | |
1253 | u8 i; | |
1254 | ||
d44f9b63 | 1255 | req = (struct hclge_tx_buff_alloc_cmd *)desc.data; |
46a3df9f S |
1256 | |
1257 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); | |
9ffe79a9 | 1258 | for (i = 0; i < HCLGE_TC_NUM; i++) { |
acf61ecd | 1259 | u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 | 1260 | |
46a3df9f S |
1261 | req->tx_pkt_buff[i] = |
1262 | cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | | |
1263 | HCLGE_BUF_SIZE_UPDATE_EN_MSK); | |
9ffe79a9 | 1264 | } |
46a3df9f S |
1265 | |
1266 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 1267 | if (ret) |
46a3df9f S |
1268 | dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", |
1269 | ret); | |
46a3df9f | 1270 | |
90415e85 | 1271 | return ret; |
46a3df9f S |
1272 | } |
1273 | ||
acf61ecd YL |
1274 | static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, |
1275 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1276 | { |
acf61ecd | 1277 | int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); |
46a3df9f | 1278 | |
90415e85 JS |
1279 | if (ret) |
1280 | dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); | |
46a3df9f | 1281 | |
90415e85 | 1282 | return ret; |
46a3df9f S |
1283 | } |
1284 | ||
1285 | static int hclge_get_tc_num(struct hclge_dev *hdev) | |
1286 | { | |
1287 | int i, cnt = 0; | |
1288 | ||
1289 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1290 | if (hdev->hw_tc_map & BIT(i)) | |
1291 | cnt++; | |
1292 | return cnt; | |
1293 | } | |
1294 | ||
1295 | static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) | |
1296 | { | |
1297 | int i, cnt = 0; | |
1298 | ||
1299 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1300 | if (hdev->hw_tc_map & BIT(i) && | |
1301 | hdev->tm_info.hw_pfc_map & BIT(i)) | |
1302 | cnt++; | |
1303 | return cnt; | |
1304 | } | |
1305 | ||
1306 | /* Get the number of pfc enabled TCs, which have private buffer */ | |
acf61ecd YL |
1307 | static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, |
1308 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1309 | { |
1310 | struct hclge_priv_buf *priv; | |
1311 | int i, cnt = 0; | |
1312 | ||
1313 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1314 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1315 | if ((hdev->tm_info.hw_pfc_map & BIT(i)) && |
1316 | priv->enable) | |
1317 | cnt++; | |
1318 | } | |
1319 | ||
1320 | return cnt; | |
1321 | } | |
1322 | ||
1323 | /* Get the number of pfc disabled TCs, which have private buffer */ | |
acf61ecd YL |
1324 | static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, |
1325 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1326 | { |
1327 | struct hclge_priv_buf *priv; | |
1328 | int i, cnt = 0; | |
1329 | ||
1330 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1331 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1332 | if (hdev->hw_tc_map & BIT(i) && |
1333 | !(hdev->tm_info.hw_pfc_map & BIT(i)) && | |
1334 | priv->enable) | |
1335 | cnt++; | |
1336 | } | |
1337 | ||
1338 | return cnt; | |
1339 | } | |
1340 | ||
acf61ecd | 1341 | static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
46a3df9f S |
1342 | { |
1343 | struct hclge_priv_buf *priv; | |
1344 | u32 rx_priv = 0; | |
1345 | int i; | |
1346 | ||
1347 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1348 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1349 | if (priv->enable) |
1350 | rx_priv += priv->buf_size; | |
1351 | } | |
1352 | return rx_priv; | |
1353 | } | |
1354 | ||
acf61ecd | 1355 | static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
9ffe79a9 YL |
1356 | { |
1357 | u32 i, total_tx_size = 0; | |
1358 | ||
1359 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
acf61ecd | 1360 | total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 YL |
1361 | |
1362 | return total_tx_size; | |
1363 | } | |
1364 | ||
acf61ecd YL |
1365 | static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, |
1366 | struct hclge_pkt_buf_alloc *buf_alloc, | |
1367 | u32 rx_all) | |
46a3df9f S |
1368 | { |
1369 | u32 shared_buf_min, shared_buf_tc, shared_std; | |
1370 | int tc_num, pfc_enable_num; | |
1371 | u32 shared_buf; | |
1372 | u32 rx_priv; | |
1373 | int i; | |
1374 | ||
1375 | tc_num = hclge_get_tc_num(hdev); | |
1376 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); | |
1377 | ||
d221df4e YL |
1378 | if (hnae3_dev_dcb_supported(hdev)) |
1379 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; | |
1380 | else | |
1381 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; | |
1382 | ||
46a3df9f S |
1383 | shared_buf_tc = pfc_enable_num * hdev->mps + |
1384 | (tc_num - pfc_enable_num) * hdev->mps / 2 + | |
1385 | hdev->mps; | |
1386 | shared_std = max_t(u32, shared_buf_min, shared_buf_tc); | |
1387 | ||
acf61ecd | 1388 | rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); |
46a3df9f S |
1389 | if (rx_all <= rx_priv + shared_std) |
1390 | return false; | |
1391 | ||
1392 | shared_buf = rx_all - rx_priv; | |
acf61ecd YL |
1393 | buf_alloc->s_buf.buf_size = shared_buf; |
1394 | buf_alloc->s_buf.self.high = shared_buf; | |
1395 | buf_alloc->s_buf.self.low = 2 * hdev->mps; | |
46a3df9f S |
1396 | |
1397 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
1398 | if ((hdev->hw_tc_map & BIT(i)) && | |
1399 | (hdev->tm_info.hw_pfc_map & BIT(i))) { | |
acf61ecd YL |
1400 | buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; |
1401 | buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; | |
46a3df9f | 1402 | } else { |
acf61ecd YL |
1403 | buf_alloc->s_buf.tc_thrd[i].low = 0; |
1404 | buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; | |
46a3df9f S |
1405 | } |
1406 | } | |
1407 | ||
1408 | return true; | |
1409 | } | |
1410 | ||
acf61ecd YL |
1411 | static int hclge_tx_buffer_calc(struct hclge_dev *hdev, |
1412 | struct hclge_pkt_buf_alloc *buf_alloc) | |
9ffe79a9 YL |
1413 | { |
1414 | u32 i, total_size; | |
1415 | ||
1416 | total_size = hdev->pkt_buf_size; | |
1417 | ||
1418 | /* alloc tx buffer for all enabled tc */ | |
1419 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1420 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
9ffe79a9 YL |
1421 | |
1422 | if (total_size < HCLGE_DEFAULT_TX_BUF) | |
1423 | return -ENOMEM; | |
1424 | ||
1425 | if (hdev->hw_tc_map & BIT(i)) | |
1426 | priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; | |
1427 | else | |
1428 | priv->tx_buf_size = 0; | |
1429 | ||
1430 | total_size -= priv->tx_buf_size; | |
1431 | } | |
1432 | ||
1433 | return 0; | |
1434 | } | |
1435 | ||
46a3df9f S |
1436 | /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs |
1437 | * @hdev: pointer to struct hclge_dev | |
acf61ecd | 1438 | * @buf_alloc: pointer to buffer calculation data |
46a3df9f S |
1439 | * @return: 0: calculate sucessful, negative: fail |
1440 | */ | |
1db9b1bf YL |
1441 | static int hclge_rx_buffer_calc(struct hclge_dev *hdev, |
1442 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1443 | { |
d748274d YL |
1444 | #define HCLGE_BUF_SIZE_UNIT 128 |
1445 | u32 rx_all = hdev->pkt_buf_size, aligned_mps; | |
46a3df9f S |
1446 | int no_pfc_priv_num, pfc_priv_num; |
1447 | struct hclge_priv_buf *priv; | |
1448 | int i; | |
1449 | ||
d748274d | 1450 | aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
acf61ecd | 1451 | rx_all -= hclge_get_tx_buff_alloced(buf_alloc); |
9ffe79a9 | 1452 | |
d602a525 YL |
1453 | /* When DCB is not supported, rx private |
1454 | * buffer is not allocated. | |
1455 | */ | |
1456 | if (!hnae3_dev_dcb_supported(hdev)) { | |
acf61ecd | 1457 | if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
d602a525 YL |
1458 | return -ENOMEM; |
1459 | ||
1460 | return 0; | |
1461 | } | |
1462 | ||
46a3df9f S |
1463 | /* step 1, try to alloc private buffer for all enabled tc */ |
1464 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1465 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1466 | if (hdev->hw_tc_map & BIT(i)) { |
1467 | priv->enable = 1; | |
1468 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
d748274d YL |
1469 | priv->wl.low = aligned_mps; |
1470 | priv->wl.high = priv->wl.low + aligned_mps; | |
46a3df9f S |
1471 | priv->buf_size = priv->wl.high + |
1472 | HCLGE_DEFAULT_DV; | |
1473 | } else { | |
1474 | priv->wl.low = 0; | |
d748274d | 1475 | priv->wl.high = 2 * aligned_mps; |
46a3df9f S |
1476 | priv->buf_size = priv->wl.high; |
1477 | } | |
bb1fe9ea YL |
1478 | } else { |
1479 | priv->enable = 0; | |
1480 | priv->wl.low = 0; | |
1481 | priv->wl.high = 0; | |
1482 | priv->buf_size = 0; | |
46a3df9f S |
1483 | } |
1484 | } | |
1485 | ||
acf61ecd | 1486 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1487 | return 0; |
1488 | ||
1489 | /* step 2, try to decrease the buffer size of | |
1490 | * no pfc TC's private buffer | |
1491 | */ | |
1492 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1493 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f | 1494 | |
bb1fe9ea YL |
1495 | priv->enable = 0; |
1496 | priv->wl.low = 0; | |
1497 | priv->wl.high = 0; | |
1498 | priv->buf_size = 0; | |
1499 | ||
1500 | if (!(hdev->hw_tc_map & BIT(i))) | |
1501 | continue; | |
1502 | ||
1503 | priv->enable = 1; | |
46a3df9f S |
1504 | |
1505 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1506 | priv->wl.low = 128; | |
d748274d | 1507 | priv->wl.high = priv->wl.low + aligned_mps; |
46a3df9f S |
1508 | priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; |
1509 | } else { | |
1510 | priv->wl.low = 0; | |
d748274d | 1511 | priv->wl.high = aligned_mps; |
46a3df9f S |
1512 | priv->buf_size = priv->wl.high; |
1513 | } | |
1514 | } | |
1515 | ||
acf61ecd | 1516 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1517 | return 0; |
1518 | ||
1519 | /* step 3, try to reduce the number of pfc disabled TCs, | |
1520 | * which have private buffer | |
1521 | */ | |
1522 | /* get the total no pfc enable TC number, which have private buffer */ | |
acf61ecd | 1523 | no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1524 | |
1525 | /* let the last to be cleared first */ | |
1526 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1527 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1528 | |
1529 | if (hdev->hw_tc_map & BIT(i) && | |
1530 | !(hdev->tm_info.hw_pfc_map & BIT(i))) { | |
1531 | /* Clear the no pfc TC private buffer */ | |
1532 | priv->wl.low = 0; | |
1533 | priv->wl.high = 0; | |
1534 | priv->buf_size = 0; | |
1535 | priv->enable = 0; | |
1536 | no_pfc_priv_num--; | |
1537 | } | |
1538 | ||
acf61ecd | 1539 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1540 | no_pfc_priv_num == 0) |
1541 | break; | |
1542 | } | |
1543 | ||
acf61ecd | 1544 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1545 | return 0; |
1546 | ||
1547 | /* step 4, try to reduce the number of pfc enabled TCs | |
1548 | * which have private buffer. | |
1549 | */ | |
acf61ecd | 1550 | pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1551 | |
1552 | /* let the last to be cleared first */ | |
1553 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1554 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1555 | |
1556 | if (hdev->hw_tc_map & BIT(i) && | |
1557 | hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1558 | /* Reduce the number of pfc TC with private buffer */ | |
1559 | priv->wl.low = 0; | |
1560 | priv->enable = 0; | |
1561 | priv->wl.high = 0; | |
1562 | priv->buf_size = 0; | |
1563 | pfc_priv_num--; | |
1564 | } | |
1565 | ||
acf61ecd | 1566 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1567 | pfc_priv_num == 0) |
1568 | break; | |
1569 | } | |
acf61ecd | 1570 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1571 | return 0; |
1572 | ||
1573 | return -ENOMEM; | |
1574 | } | |
1575 | ||
acf61ecd YL |
1576 | static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, |
1577 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1578 | { |
d44f9b63 | 1579 | struct hclge_rx_priv_buff_cmd *req; |
46a3df9f S |
1580 | struct hclge_desc desc; |
1581 | int ret; | |
1582 | int i; | |
1583 | ||
1584 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); | |
d44f9b63 | 1585 | req = (struct hclge_rx_priv_buff_cmd *)desc.data; |
46a3df9f S |
1586 | |
1587 | /* Alloc private buffer TCs */ | |
1588 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1589 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1590 | |
1591 | req->buf_num[i] = | |
1592 | cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); | |
1593 | req->buf_num[i] |= | |
5bca3b94 | 1594 | cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); |
46a3df9f S |
1595 | } |
1596 | ||
b8c8bf47 | 1597 | req->shared_buf = |
acf61ecd | 1598 | cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | |
b8c8bf47 YL |
1599 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); |
1600 | ||
46a3df9f | 1601 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
90415e85 | 1602 | if (ret) |
46a3df9f S |
1603 | dev_err(&hdev->pdev->dev, |
1604 | "rx private buffer alloc cmd failed %d\n", ret); | |
46a3df9f | 1605 | |
90415e85 | 1606 | return ret; |
46a3df9f S |
1607 | } |
1608 | ||
acf61ecd YL |
1609 | static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, |
1610 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1611 | { |
1612 | struct hclge_rx_priv_wl_buf *req; | |
1613 | struct hclge_priv_buf *priv; | |
1614 | struct hclge_desc desc[2]; | |
1615 | int i, j; | |
1616 | int ret; | |
1617 | ||
1618 | for (i = 0; i < 2; i++) { | |
1619 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, | |
1620 | false); | |
1621 | req = (struct hclge_rx_priv_wl_buf *)desc[i].data; | |
1622 | ||
1623 | /* The first descriptor set the NEXT bit to 1 */ | |
1624 | if (i == 0) | |
1625 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1626 | else | |
1627 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1628 | ||
1629 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
acf61ecd YL |
1630 | u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; |
1631 | ||
1632 | priv = &buf_alloc->priv_buf[idx]; | |
46a3df9f S |
1633 | req->tc_wl[j].high = |
1634 | cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); | |
1635 | req->tc_wl[j].high |= | |
ee6b549b | 1636 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1637 | req->tc_wl[j].low = |
1638 | cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); | |
1639 | req->tc_wl[j].low |= | |
ee6b549b | 1640 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1641 | } |
1642 | } | |
1643 | ||
1644 | /* Send 2 descriptor at one time */ | |
1645 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
90415e85 | 1646 | if (ret) |
46a3df9f S |
1647 | dev_err(&hdev->pdev->dev, |
1648 | "rx private waterline config cmd failed %d\n", | |
1649 | ret); | |
90415e85 | 1650 | return ret; |
46a3df9f S |
1651 | } |
1652 | ||
acf61ecd YL |
1653 | static int hclge_common_thrd_config(struct hclge_dev *hdev, |
1654 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1655 | { |
acf61ecd | 1656 | struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; |
46a3df9f S |
1657 | struct hclge_rx_com_thrd *req; |
1658 | struct hclge_desc desc[2]; | |
1659 | struct hclge_tc_thrd *tc; | |
1660 | int i, j; | |
1661 | int ret; | |
1662 | ||
1663 | for (i = 0; i < 2; i++) { | |
1664 | hclge_cmd_setup_basic_desc(&desc[i], | |
1665 | HCLGE_OPC_RX_COM_THRD_ALLOC, false); | |
1666 | req = (struct hclge_rx_com_thrd *)&desc[i].data; | |
1667 | ||
1668 | /* The first descriptor set the NEXT bit to 1 */ | |
1669 | if (i == 0) | |
1670 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1671 | else | |
1672 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1673 | ||
1674 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
1675 | tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; | |
1676 | ||
1677 | req->com_thrd[j].high = | |
1678 | cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); | |
1679 | req->com_thrd[j].high |= | |
ee6b549b | 1680 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1681 | req->com_thrd[j].low = |
1682 | cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); | |
1683 | req->com_thrd[j].low |= | |
ee6b549b | 1684 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1685 | } |
1686 | } | |
1687 | ||
1688 | /* Send 2 descriptors at one time */ | |
1689 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
90415e85 | 1690 | if (ret) |
46a3df9f S |
1691 | dev_err(&hdev->pdev->dev, |
1692 | "common threshold config cmd failed %d\n", ret); | |
90415e85 | 1693 | return ret; |
46a3df9f S |
1694 | } |
1695 | ||
acf61ecd YL |
1696 | static int hclge_common_wl_config(struct hclge_dev *hdev, |
1697 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1698 | { |
acf61ecd | 1699 | struct hclge_shared_buf *buf = &buf_alloc->s_buf; |
46a3df9f S |
1700 | struct hclge_rx_com_wl *req; |
1701 | struct hclge_desc desc; | |
1702 | int ret; | |
1703 | ||
1704 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); | |
1705 | ||
1706 | req = (struct hclge_rx_com_wl *)desc.data; | |
1707 | req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); | |
ee6b549b | 1708 | req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1709 | |
1710 | req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); | |
ee6b549b | 1711 | req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1712 | |
1713 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 1714 | if (ret) |
46a3df9f S |
1715 | dev_err(&hdev->pdev->dev, |
1716 | "common waterline config cmd failed %d\n", ret); | |
930ff2f6 | 1717 | |
90415e85 | 1718 | return ret; |
46a3df9f S |
1719 | } |
1720 | ||
1721 | int hclge_buffer_alloc(struct hclge_dev *hdev) | |
1722 | { | |
acf61ecd | 1723 | struct hclge_pkt_buf_alloc *pkt_buf; |
46a3df9f S |
1724 | int ret; |
1725 | ||
acf61ecd YL |
1726 | pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); |
1727 | if (!pkt_buf) | |
46a3df9f S |
1728 | return -ENOMEM; |
1729 | ||
acf61ecd | 1730 | ret = hclge_tx_buffer_calc(hdev, pkt_buf); |
9ffe79a9 YL |
1731 | if (ret) { |
1732 | dev_err(&hdev->pdev->dev, | |
1733 | "could not calc tx buffer size for all TCs %d\n", ret); | |
acf61ecd | 1734 | goto out; |
9ffe79a9 YL |
1735 | } |
1736 | ||
acf61ecd | 1737 | ret = hclge_tx_buffer_alloc(hdev, pkt_buf); |
46a3df9f S |
1738 | if (ret) { |
1739 | dev_err(&hdev->pdev->dev, | |
1740 | "could not alloc tx buffers %d\n", ret); | |
acf61ecd | 1741 | goto out; |
46a3df9f S |
1742 | } |
1743 | ||
acf61ecd | 1744 | ret = hclge_rx_buffer_calc(hdev, pkt_buf); |
46a3df9f S |
1745 | if (ret) { |
1746 | dev_err(&hdev->pdev->dev, | |
1747 | "could not calc rx priv buffer size for all TCs %d\n", | |
1748 | ret); | |
acf61ecd | 1749 | goto out; |
46a3df9f S |
1750 | } |
1751 | ||
acf61ecd | 1752 | ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); |
46a3df9f S |
1753 | if (ret) { |
1754 | dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", | |
1755 | ret); | |
acf61ecd | 1756 | goto out; |
46a3df9f S |
1757 | } |
1758 | ||
2daf4a65 | 1759 | if (hnae3_dev_dcb_supported(hdev)) { |
acf61ecd | 1760 | ret = hclge_rx_priv_wl_config(hdev, pkt_buf); |
2daf4a65 YL |
1761 | if (ret) { |
1762 | dev_err(&hdev->pdev->dev, | |
1763 | "could not configure rx private waterline %d\n", | |
1764 | ret); | |
acf61ecd | 1765 | goto out; |
2daf4a65 | 1766 | } |
46a3df9f | 1767 | |
acf61ecd | 1768 | ret = hclge_common_thrd_config(hdev, pkt_buf); |
2daf4a65 YL |
1769 | if (ret) { |
1770 | dev_err(&hdev->pdev->dev, | |
1771 | "could not configure common threshold %d\n", | |
1772 | ret); | |
acf61ecd | 1773 | goto out; |
2daf4a65 | 1774 | } |
46a3df9f S |
1775 | } |
1776 | ||
acf61ecd YL |
1777 | ret = hclge_common_wl_config(hdev, pkt_buf); |
1778 | if (ret) | |
46a3df9f S |
1779 | dev_err(&hdev->pdev->dev, |
1780 | "could not configure common waterline %d\n", ret); | |
46a3df9f | 1781 | |
acf61ecd YL |
1782 | out: |
1783 | kfree(pkt_buf); | |
1784 | return ret; | |
46a3df9f S |
1785 | } |
1786 | ||
1787 | static int hclge_init_roce_base_info(struct hclge_vport *vport) | |
1788 | { | |
1789 | struct hnae3_handle *roce = &vport->roce; | |
1790 | struct hnae3_handle *nic = &vport->nic; | |
1791 | ||
887c3820 | 1792 | roce->rinfo.num_vectors = vport->back->num_roce_msi; |
46a3df9f S |
1793 | |
1794 | if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || | |
1795 | vport->back->num_msi_left == 0) | |
1796 | return -EINVAL; | |
1797 | ||
1798 | roce->rinfo.base_vector = vport->back->roce_base_vector; | |
1799 | ||
1800 | roce->rinfo.netdev = nic->kinfo.netdev; | |
1801 | roce->rinfo.roce_io_base = vport->back->hw.io_base; | |
1802 | ||
1803 | roce->pdev = nic->pdev; | |
1804 | roce->ae_algo = nic->ae_algo; | |
1805 | roce->numa_node_mask = nic->numa_node_mask; | |
1806 | ||
1807 | return 0; | |
1808 | } | |
1809 | ||
887c3820 | 1810 | static int hclge_init_msi(struct hclge_dev *hdev) |
46a3df9f S |
1811 | { |
1812 | struct pci_dev *pdev = hdev->pdev; | |
887c3820 SM |
1813 | int vectors; |
1814 | int i; | |
46a3df9f | 1815 | |
887c3820 SM |
1816 | vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, |
1817 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | |
1818 | if (vectors < 0) { | |
1819 | dev_err(&pdev->dev, | |
1820 | "failed(%d) to allocate MSI/MSI-X vectors\n", | |
1821 | vectors); | |
1822 | return vectors; | |
46a3df9f | 1823 | } |
887c3820 SM |
1824 | if (vectors < hdev->num_msi) |
1825 | dev_warn(&hdev->pdev->dev, | |
1826 | "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", | |
1827 | hdev->num_msi, vectors); | |
46a3df9f | 1828 | |
887c3820 SM |
1829 | hdev->num_msi = vectors; |
1830 | hdev->num_msi_left = vectors; | |
1831 | hdev->base_msi_vector = pdev->irq; | |
46a3df9f | 1832 | hdev->roce_base_vector = hdev->base_msi_vector + |
5355e6d3 | 1833 | hdev->roce_base_msix_offset; |
46a3df9f | 1834 | |
46a3df9f S |
1835 | hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1836 | sizeof(u16), GFP_KERNEL); | |
887c3820 SM |
1837 | if (!hdev->vector_status) { |
1838 | pci_free_irq_vectors(pdev); | |
46a3df9f | 1839 | return -ENOMEM; |
887c3820 | 1840 | } |
46a3df9f S |
1841 | |
1842 | for (i = 0; i < hdev->num_msi; i++) | |
1843 | hdev->vector_status[i] = HCLGE_INVALID_VPORT; | |
1844 | ||
887c3820 SM |
1845 | hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1846 | sizeof(int), GFP_KERNEL); | |
1847 | if (!hdev->vector_irq) { | |
1848 | pci_free_irq_vectors(pdev); | |
1849 | return -ENOMEM; | |
46a3df9f | 1850 | } |
46a3df9f S |
1851 | |
1852 | return 0; | |
1853 | } | |
1854 | ||
1c780066 | 1855 | static u8 hclge_check_speed_dup(u8 duplex, int speed) |
46a3df9f | 1856 | { |
46a3df9f | 1857 | |
1c780066 YL |
1858 | if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) |
1859 | duplex = HCLGE_MAC_FULL; | |
46a3df9f | 1860 | |
1c780066 | 1861 | return duplex; |
46a3df9f S |
1862 | } |
1863 | ||
1c780066 YL |
1864 | static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, |
1865 | u8 duplex) | |
46a3df9f | 1866 | { |
d44f9b63 | 1867 | struct hclge_config_mac_speed_dup_cmd *req; |
46a3df9f S |
1868 | struct hclge_desc desc; |
1869 | int ret; | |
1870 | ||
d44f9b63 | 1871 | req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; |
46a3df9f S |
1872 | |
1873 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); | |
1874 | ||
ccc23ef3 | 1875 | hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); |
46a3df9f S |
1876 | |
1877 | switch (speed) { | |
1878 | case HCLGE_MAC_SPEED_10M: | |
ccc23ef3 PL |
1879 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1880 | HCLGE_CFG_SPEED_S, 6); | |
46a3df9f S |
1881 | break; |
1882 | case HCLGE_MAC_SPEED_100M: | |
ccc23ef3 PL |
1883 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1884 | HCLGE_CFG_SPEED_S, 7); | |
46a3df9f S |
1885 | break; |
1886 | case HCLGE_MAC_SPEED_1G: | |
ccc23ef3 PL |
1887 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1888 | HCLGE_CFG_SPEED_S, 0); | |
46a3df9f S |
1889 | break; |
1890 | case HCLGE_MAC_SPEED_10G: | |
ccc23ef3 PL |
1891 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1892 | HCLGE_CFG_SPEED_S, 1); | |
46a3df9f S |
1893 | break; |
1894 | case HCLGE_MAC_SPEED_25G: | |
ccc23ef3 PL |
1895 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1896 | HCLGE_CFG_SPEED_S, 2); | |
46a3df9f S |
1897 | break; |
1898 | case HCLGE_MAC_SPEED_40G: | |
ccc23ef3 PL |
1899 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1900 | HCLGE_CFG_SPEED_S, 3); | |
46a3df9f S |
1901 | break; |
1902 | case HCLGE_MAC_SPEED_50G: | |
ccc23ef3 PL |
1903 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1904 | HCLGE_CFG_SPEED_S, 4); | |
46a3df9f S |
1905 | break; |
1906 | case HCLGE_MAC_SPEED_100G: | |
ccc23ef3 PL |
1907 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1908 | HCLGE_CFG_SPEED_S, 5); | |
46a3df9f S |
1909 | break; |
1910 | default: | |
d7629e74 | 1911 | dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); |
46a3df9f S |
1912 | return -EINVAL; |
1913 | } | |
1914 | ||
ccc23ef3 PL |
1915 | hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, |
1916 | 1); | |
46a3df9f S |
1917 | |
1918 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1919 | if (ret) { | |
1920 | dev_err(&hdev->pdev->dev, | |
1921 | "mac speed/duplex config cmd failed %d.\n", ret); | |
1922 | return ret; | |
1923 | } | |
1924 | ||
1c780066 YL |
1925 | return 0; |
1926 | } | |
1927 | ||
1928 | int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) | |
1929 | { | |
1930 | int ret; | |
1931 | ||
1932 | duplex = hclge_check_speed_dup(duplex, speed); | |
1933 | if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) | |
1934 | return 0; | |
1935 | ||
1936 | ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); | |
1937 | if (ret) | |
1938 | return ret; | |
1939 | ||
1940 | hdev->hw.mac.speed = speed; | |
1941 | hdev->hw.mac.duplex = duplex; | |
46a3df9f S |
1942 | |
1943 | return 0; | |
1944 | } | |
1945 | ||
1946 | static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, | |
1947 | u8 duplex) | |
1948 | { | |
1949 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1950 | struct hclge_dev *hdev = vport->back; | |
1951 | ||
1952 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex); | |
1953 | } | |
1954 | ||
1955 | static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, | |
1956 | u8 *duplex) | |
1957 | { | |
d44f9b63 | 1958 | struct hclge_query_an_speed_dup_cmd *req; |
46a3df9f S |
1959 | struct hclge_desc desc; |
1960 | int speed_tmp; | |
1961 | int ret; | |
1962 | ||
d44f9b63 | 1963 | req = (struct hclge_query_an_speed_dup_cmd *)desc.data; |
46a3df9f S |
1964 | |
1965 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); | |
1966 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1967 | if (ret) { | |
1968 | dev_err(&hdev->pdev->dev, | |
1969 | "mac speed/autoneg/duplex query cmd failed %d\n", | |
1970 | ret); | |
1971 | return ret; | |
1972 | } | |
1973 | ||
ccc23ef3 PL |
1974 | *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); |
1975 | speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, | |
1976 | HCLGE_QUERY_SPEED_S); | |
46a3df9f S |
1977 | |
1978 | ret = hclge_parse_speed(speed_tmp, speed); | |
90415e85 | 1979 | if (ret) |
46a3df9f S |
1980 | dev_err(&hdev->pdev->dev, |
1981 | "could not parse speed(=%d), %d\n", speed_tmp, ret); | |
46a3df9f | 1982 | |
90415e85 | 1983 | return ret; |
46a3df9f S |
1984 | } |
1985 | ||
46a3df9f S |
1986 | static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) |
1987 | { | |
d44f9b63 | 1988 | struct hclge_config_auto_neg_cmd *req; |
46a3df9f | 1989 | struct hclge_desc desc; |
a90bb9a5 | 1990 | u32 flag = 0; |
46a3df9f S |
1991 | int ret; |
1992 | ||
1993 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); | |
1994 | ||
d44f9b63 | 1995 | req = (struct hclge_config_auto_neg_cmd *)desc.data; |
ccc23ef3 | 1996 | hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); |
a90bb9a5 | 1997 | req->cfg_an_cmd_flag = cpu_to_le32(flag); |
46a3df9f S |
1998 | |
1999 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 2000 | if (ret) |
46a3df9f S |
2001 | dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", |
2002 | ret); | |
46a3df9f | 2003 | |
90415e85 | 2004 | return ret; |
46a3df9f S |
2005 | } |
2006 | ||
2007 | static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) | |
2008 | { | |
2009 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2010 | struct hclge_dev *hdev = vport->back; | |
2011 | ||
2012 | return hclge_set_autoneg_en(hdev, enable); | |
2013 | } | |
2014 | ||
2015 | static int hclge_get_autoneg(struct hnae3_handle *handle) | |
2016 | { | |
2017 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2018 | struct hclge_dev *hdev = vport->back; | |
9ff804ee FL |
2019 | struct phy_device *phydev = hdev->hw.mac.phydev; |
2020 | ||
2021 | if (phydev) | |
2022 | return phydev->autoneg; | |
46a3df9f S |
2023 | |
2024 | return hdev->hw.mac.autoneg; | |
2025 | } | |
2026 | ||
2027 | static int hclge_mac_init(struct hclge_dev *hdev) | |
2028 | { | |
2029 | struct hclge_mac *mac = &hdev->hw.mac; | |
2030 | int ret; | |
2031 | ||
1c780066 YL |
2032 | hdev->hw.mac.duplex = HCLGE_MAC_FULL; |
2033 | ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, | |
2034 | hdev->hw.mac.duplex); | |
46a3df9f S |
2035 | if (ret) { |
2036 | dev_err(&hdev->pdev->dev, | |
2037 | "Config mac speed dup fail ret=%d\n", ret); | |
2038 | return ret; | |
2039 | } | |
2040 | ||
2041 | mac->link = 0; | |
2042 | ||
4ee09281 YL |
2043 | ret = hclge_set_mac_mtu(hdev, hdev->mps); |
2044 | if (ret) { | |
2045 | dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); | |
2046 | return ret; | |
2047 | } | |
59bc85ec | 2048 | |
4ee09281 | 2049 | ret = hclge_buffer_alloc(hdev); |
90415e85 | 2050 | if (ret) |
59bc85ec | 2051 | dev_err(&hdev->pdev->dev, |
4ee09281 | 2052 | "allocate buffer fail, ret=%d\n", ret); |
59bc85ec | 2053 | |
90415e85 | 2054 | return ret; |
46a3df9f S |
2055 | } |
2056 | ||
22fd3468 SM |
2057 | static void hclge_mbx_task_schedule(struct hclge_dev *hdev) |
2058 | { | |
2059 | if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) | |
2060 | schedule_work(&hdev->mbx_service_task); | |
2061 | } | |
2062 | ||
ed4a1bb8 SM |
2063 | static void hclge_reset_task_schedule(struct hclge_dev *hdev) |
2064 | { | |
2065 | if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) | |
2066 | schedule_work(&hdev->rst_service_task); | |
2067 | } | |
2068 | ||
46a3df9f S |
2069 | static void hclge_task_schedule(struct hclge_dev *hdev) |
2070 | { | |
2071 | if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && | |
2072 | !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && | |
2073 | !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) | |
2074 | (void)schedule_work(&hdev->service_task); | |
2075 | } | |
2076 | ||
2077 | static int hclge_get_mac_link_status(struct hclge_dev *hdev) | |
2078 | { | |
d44f9b63 | 2079 | struct hclge_link_status_cmd *req; |
46a3df9f S |
2080 | struct hclge_desc desc; |
2081 | int link_status; | |
2082 | int ret; | |
2083 | ||
2084 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); | |
2085 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2086 | if (ret) { | |
2087 | dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", | |
2088 | ret); | |
2089 | return ret; | |
2090 | } | |
2091 | ||
d44f9b63 | 2092 | req = (struct hclge_link_status_cmd *)desc.data; |
e23e21ea | 2093 | link_status = req->status & HCLGE_LINK_STATUS_UP_M; |
46a3df9f S |
2094 | |
2095 | return !!link_status; | |
2096 | } | |
2097 | ||
2098 | static int hclge_get_mac_phy_link(struct hclge_dev *hdev) | |
2099 | { | |
2100 | int mac_state; | |
2101 | int link_stat; | |
2102 | ||
ed6acb33 PL |
2103 | if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) |
2104 | return 0; | |
2105 | ||
46a3df9f S |
2106 | mac_state = hclge_get_mac_link_status(hdev); |
2107 | ||
2108 | if (hdev->hw.mac.phydev) { | |
7ce8e698 | 2109 | if (hdev->hw.mac.phydev->state == PHY_RUNNING) |
46a3df9f S |
2110 | link_stat = mac_state & |
2111 | hdev->hw.mac.phydev->link; | |
2112 | else | |
2113 | link_stat = 0; | |
2114 | ||
2115 | } else { | |
2116 | link_stat = mac_state; | |
2117 | } | |
2118 | ||
2119 | return !!link_stat; | |
2120 | } | |
2121 | ||
2122 | static void hclge_update_link_status(struct hclge_dev *hdev) | |
2123 | { | |
2124 | struct hnae3_client *client = hdev->nic_client; | |
2125 | struct hnae3_handle *handle; | |
2126 | int state; | |
2127 | int i; | |
2128 | ||
2129 | if (!client) | |
2130 | return; | |
2131 | state = hclge_get_mac_phy_link(hdev); | |
2132 | if (state != hdev->hw.mac.link) { | |
2133 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2134 | handle = &hdev->vport[i].nic; | |
2135 | client->ops->link_status_change(handle, state); | |
2136 | } | |
2137 | hdev->hw.mac.link = state; | |
2138 | } | |
2139 | } | |
2140 | ||
2141 | static int hclge_update_speed_duplex(struct hclge_dev *hdev) | |
2142 | { | |
2143 | struct hclge_mac mac = hdev->hw.mac; | |
2144 | u8 duplex; | |
2145 | int speed; | |
2146 | int ret; | |
2147 | ||
2148 | /* get the speed and duplex as autoneg'result from mac cmd when phy | |
2149 | * doesn't exit. | |
2150 | */ | |
c040366b | 2151 | if (mac.phydev || !mac.autoneg) |
46a3df9f S |
2152 | return 0; |
2153 | ||
2154 | ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); | |
2155 | if (ret) { | |
2156 | dev_err(&hdev->pdev->dev, | |
2157 | "mac autoneg/speed/duplex query failed %d\n", ret); | |
2158 | return ret; | |
2159 | } | |
2160 | ||
1c780066 YL |
2161 | ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); |
2162 | if (ret) { | |
2163 | dev_err(&hdev->pdev->dev, | |
2164 | "mac speed/duplex config failed %d\n", ret); | |
2165 | return ret; | |
46a3df9f S |
2166 | } |
2167 | ||
2168 | return 0; | |
2169 | } | |
2170 | ||
2171 | static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) | |
2172 | { | |
2173 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2174 | struct hclge_dev *hdev = vport->back; | |
2175 | ||
2176 | return hclge_update_speed_duplex(hdev); | |
2177 | } | |
2178 | ||
2179 | static int hclge_get_status(struct hnae3_handle *handle) | |
2180 | { | |
2181 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2182 | struct hclge_dev *hdev = vport->back; | |
2183 | ||
2184 | hclge_update_link_status(hdev); | |
2185 | ||
2186 | return hdev->hw.mac.link; | |
2187 | } | |
2188 | ||
d039ef68 | 2189 | static void hclge_service_timer(struct timer_list *t) |
46a3df9f | 2190 | { |
d039ef68 | 2191 | struct hclge_dev *hdev = from_timer(hdev, t, service_timer); |
46a3df9f | 2192 | |
d039ef68 | 2193 | mod_timer(&hdev->service_timer, jiffies + HZ); |
7a5d2a39 | 2194 | hdev->hw_stats.stats_timer++; |
46a3df9f S |
2195 | hclge_task_schedule(hdev); |
2196 | } | |
2197 | ||
2198 | static void hclge_service_complete(struct hclge_dev *hdev) | |
2199 | { | |
2200 | WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); | |
2201 | ||
2202 | /* Flush memory before next watchdog */ | |
2203 | smp_mb__before_atomic(); | |
2204 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); | |
2205 | } | |
2206 | ||
202f2014 SM |
2207 | static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) |
2208 | { | |
00029070 | 2209 | u32 rst_src_reg, cmdq_src_reg, msix_src_reg; |
202f2014 SM |
2210 | |
2211 | /* fetch the events from their corresponding regs */ | |
0bcc9ba1 | 2212 | rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); |
22fd3468 | 2213 | cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); |
00029070 SM |
2214 | msix_src_reg = hclge_read_dev(&hdev->hw, |
2215 | HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); | |
22fd3468 SM |
2216 | |
2217 | /* Assumption: If by any chance reset and mailbox events are reported | |
2218 | * together then we will only process reset event in this go and will | |
2219 | * defer the processing of the mailbox events. Since, we would have not | |
2220 | * cleared RX CMDQ event this time we would receive again another | |
2221 | * interrupt from H/W just for the mailbox. | |
2222 | */ | |
202f2014 SM |
2223 | |
2224 | /* check for vector0 reset event sources */ | |
de2eae69 HT |
2225 | if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { |
2226 | dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); | |
2227 | set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); | |
2228 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2229 | *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2230 | return HCLGE_VECTOR0_EVENT_RST; | |
2231 | } | |
2232 | ||
202f2014 | 2233 | if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { |
1afdb53a | 2234 | dev_info(&hdev->pdev->dev, "global reset interrupt\n"); |
7edef4ce | 2235 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
202f2014 SM |
2236 | set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); |
2237 | *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2238 | return HCLGE_VECTOR0_EVENT_RST; | |
2239 | } | |
2240 | ||
2241 | if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { | |
1afdb53a | 2242 | dev_info(&hdev->pdev->dev, "core reset interrupt\n"); |
7edef4ce | 2243 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
202f2014 SM |
2244 | set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); |
2245 | *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2246 | return HCLGE_VECTOR0_EVENT_RST; | |
2247 | } | |
2248 | ||
00029070 SM |
2249 | /* check for vector0 msix event source */ |
2250 | if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) | |
2251 | return HCLGE_VECTOR0_EVENT_ERR; | |
2252 | ||
22fd3468 SM |
2253 | /* check for vector0 mailbox(=CMDQ RX) event source */ |
2254 | if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { | |
2255 | cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); | |
2256 | *clearval = cmdq_src_reg; | |
2257 | return HCLGE_VECTOR0_EVENT_MBX; | |
2258 | } | |
202f2014 SM |
2259 | |
2260 | return HCLGE_VECTOR0_EVENT_OTHER; | |
2261 | } | |
2262 | ||
2263 | static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, | |
2264 | u32 regclr) | |
2265 | { | |
22fd3468 SM |
2266 | switch (event_type) { |
2267 | case HCLGE_VECTOR0_EVENT_RST: | |
202f2014 | 2268 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); |
22fd3468 SM |
2269 | break; |
2270 | case HCLGE_VECTOR0_EVENT_MBX: | |
2271 | hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); | |
2272 | break; | |
085920ba JS |
2273 | default: |
2274 | break; | |
22fd3468 | 2275 | } |
202f2014 SM |
2276 | } |
2277 | ||
9ab4ad14 XW |
2278 | static void hclge_clear_all_event_cause(struct hclge_dev *hdev) |
2279 | { | |
2280 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, | |
2281 | BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | | |
2282 | BIT(HCLGE_VECTOR0_CORERESET_INT_B) | | |
2283 | BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); | |
2284 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); | |
2285 | } | |
2286 | ||
466b0c00 L |
2287 | static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) |
2288 | { | |
2289 | writel(enable ? 1 : 0, vector->addr); | |
2290 | } | |
2291 | ||
2292 | static irqreturn_t hclge_misc_irq_handle(int irq, void *data) | |
2293 | { | |
2294 | struct hclge_dev *hdev = data; | |
202f2014 SM |
2295 | u32 event_cause; |
2296 | u32 clearval; | |
466b0c00 L |
2297 | |
2298 | hclge_enable_vector(&hdev->misc_vector, false); | |
202f2014 SM |
2299 | event_cause = hclge_check_event_cause(hdev, &clearval); |
2300 | ||
22fd3468 | 2301 | /* vector 0 interrupt is shared with reset and mailbox source events.*/ |
202f2014 | 2302 | switch (event_cause) { |
00029070 SM |
2303 | case HCLGE_VECTOR0_EVENT_ERR: |
2304 | /* we do not know what type of reset is required now. This could | |
2305 | * only be decided after we fetch the type of errors which | |
2306 | * caused this event. Therefore, we will do below for now: | |
2307 | * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we | |
2308 | * have defered type of reset to be used. | |
2309 | * 2. Schedule the reset serivce task. | |
2310 | * 3. When service task receives HNAE3_UNKNOWN_RESET type it | |
2311 | * will fetch the correct type of reset. This would be done | |
2312 | * by first decoding the types of errors. | |
2313 | */ | |
2314 | set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); | |
2315 | /* fall through */ | |
202f2014 | 2316 | case HCLGE_VECTOR0_EVENT_RST: |
ed4a1bb8 | 2317 | hclge_reset_task_schedule(hdev); |
202f2014 | 2318 | break; |
22fd3468 SM |
2319 | case HCLGE_VECTOR0_EVENT_MBX: |
2320 | /* If we are here then, | |
2321 | * 1. Either we are not handling any mbx task and we are not | |
2322 | * scheduled as well | |
2323 | * OR | |
2324 | * 2. We could be handling a mbx task but nothing more is | |
2325 | * scheduled. | |
2326 | * In both cases, we should schedule mbx task as there are more | |
2327 | * mbx messages reported by this interrupt. | |
2328 | */ | |
2329 | hclge_mbx_task_schedule(hdev); | |
40ee4b71 | 2330 | break; |
202f2014 | 2331 | default: |
40ee4b71 YL |
2332 | dev_warn(&hdev->pdev->dev, |
2333 | "received unknown or unhandled event of vector0\n"); | |
202f2014 SM |
2334 | break; |
2335 | } | |
2336 | ||
e9a50d09 | 2337 | /* clear the source of interrupt if it is not cause by reset */ |
c9fc48dc | 2338 | if (event_cause == HCLGE_VECTOR0_EVENT_MBX) { |
e9a50d09 YL |
2339 | hclge_clear_event_cause(hdev, event_cause, clearval); |
2340 | hclge_enable_vector(&hdev->misc_vector, true); | |
2341 | } | |
466b0c00 L |
2342 | |
2343 | return IRQ_HANDLED; | |
2344 | } | |
2345 | ||
2346 | static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) | |
2347 | { | |
1dc5378f PL |
2348 | if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { |
2349 | dev_warn(&hdev->pdev->dev, | |
2350 | "vector(vector_id %d) has been freed.\n", vector_id); | |
2351 | return; | |
2352 | } | |
2353 | ||
466b0c00 L |
2354 | hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; |
2355 | hdev->num_msi_left += 1; | |
2356 | hdev->num_msi_used -= 1; | |
2357 | } | |
2358 | ||
2359 | static void hclge_get_misc_vector(struct hclge_dev *hdev) | |
2360 | { | |
2361 | struct hclge_misc_vector *vector = &hdev->misc_vector; | |
2362 | ||
2363 | vector->vector_irq = pci_irq_vector(hdev->pdev, 0); | |
2364 | ||
2365 | vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; | |
2366 | hdev->vector_status[0] = 0; | |
2367 | ||
2368 | hdev->num_msi_left -= 1; | |
2369 | hdev->num_msi_used += 1; | |
2370 | } | |
2371 | ||
2372 | static int hclge_misc_irq_init(struct hclge_dev *hdev) | |
2373 | { | |
2374 | int ret; | |
2375 | ||
2376 | hclge_get_misc_vector(hdev); | |
2377 | ||
202f2014 SM |
2378 | /* this would be explicitly freed in the end */ |
2379 | ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, | |
2380 | 0, "hclge_misc", hdev); | |
466b0c00 L |
2381 | if (ret) { |
2382 | hclge_free_vector(hdev, 0); | |
2383 | dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", | |
2384 | hdev->misc_vector.vector_irq); | |
2385 | } | |
2386 | ||
2387 | return ret; | |
2388 | } | |
2389 | ||
202f2014 SM |
2390 | static void hclge_misc_irq_uninit(struct hclge_dev *hdev) |
2391 | { | |
2392 | free_irq(hdev->misc_vector.vector_irq, hdev); | |
2393 | hclge_free_vector(hdev, 0); | |
2394 | } | |
2395 | ||
4ed340ab L |
2396 | static int hclge_notify_client(struct hclge_dev *hdev, |
2397 | enum hnae3_reset_notify_type type) | |
2398 | { | |
2399 | struct hnae3_client *client = hdev->nic_client; | |
2400 | u16 i; | |
2401 | ||
2402 | if (!client->ops->reset_notify) | |
2403 | return -EOPNOTSUPP; | |
2404 | ||
2405 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
ad7c82fe | 2406 | struct hnae3_handle *handle = &hdev->vport[i].nic; |
2407 | int ret; | |
b38db544 | 2408 | |
4ed340ab | 2409 | ret = client->ops->reset_notify(handle, type); |
1afdb53a HT |
2410 | if (ret) { |
2411 | dev_err(&hdev->pdev->dev, | |
2412 | "notify nic client failed %d(%d)\n", type, ret); | |
4ed340ab | 2413 | return ret; |
1afdb53a | 2414 | } |
4ed340ab L |
2415 | } |
2416 | ||
6060dc84 | 2417 | return 0; |
4ed340ab L |
2418 | } |
2419 | ||
3db6b633 HT |
2420 | static int hclge_notify_roce_client(struct hclge_dev *hdev, |
2421 | enum hnae3_reset_notify_type type) | |
2422 | { | |
2423 | struct hnae3_client *client = hdev->roce_client; | |
2424 | int ret = 0; | |
2425 | u16 i; | |
2426 | ||
2427 | if (!client) | |
2428 | return 0; | |
2429 | ||
2430 | if (!client->ops->reset_notify) | |
2431 | return -EOPNOTSUPP; | |
2432 | ||
2433 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2434 | struct hnae3_handle *handle = &hdev->vport[i].roce; | |
2435 | ||
2436 | ret = client->ops->reset_notify(handle, type); | |
2437 | if (ret) { | |
2438 | dev_err(&hdev->pdev->dev, | |
2439 | "notify roce client failed %d(%d)", | |
2440 | type, ret); | |
2441 | return ret; | |
2442 | } | |
2443 | } | |
2444 | ||
2445 | return ret; | |
2446 | } | |
2447 | ||
4ed340ab L |
2448 | static int hclge_reset_wait(struct hclge_dev *hdev) |
2449 | { | |
2450 | #define HCLGE_RESET_WATI_MS 100 | |
de2eae69 | 2451 | #define HCLGE_RESET_WAIT_CNT 200 |
4ed340ab L |
2452 | u32 val, reg, reg_bit; |
2453 | u32 cnt = 0; | |
2454 | ||
2455 | switch (hdev->reset_type) { | |
de2eae69 HT |
2456 | case HNAE3_IMP_RESET: |
2457 | reg = HCLGE_GLOBAL_RESET_REG; | |
2458 | reg_bit = HCLGE_IMP_RESET_BIT; | |
2459 | break; | |
4ed340ab L |
2460 | case HNAE3_GLOBAL_RESET: |
2461 | reg = HCLGE_GLOBAL_RESET_REG; | |
2462 | reg_bit = HCLGE_GLOBAL_RESET_BIT; | |
2463 | break; | |
2464 | case HNAE3_CORE_RESET: | |
2465 | reg = HCLGE_GLOBAL_RESET_REG; | |
2466 | reg_bit = HCLGE_CORE_RESET_BIT; | |
2467 | break; | |
2468 | case HNAE3_FUNC_RESET: | |
2469 | reg = HCLGE_FUN_RST_ING; | |
2470 | reg_bit = HCLGE_FUN_RST_ING_B; | |
2471 | break; | |
26977990 HT |
2472 | case HNAE3_FLR_RESET: |
2473 | break; | |
4ed340ab L |
2474 | default: |
2475 | dev_err(&hdev->pdev->dev, | |
2476 | "Wait for unsupported reset type: %d\n", | |
2477 | hdev->reset_type); | |
2478 | return -EINVAL; | |
2479 | } | |
2480 | ||
26977990 HT |
2481 | if (hdev->reset_type == HNAE3_FLR_RESET) { |
2482 | while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && | |
2483 | cnt++ < HCLGE_RESET_WAIT_CNT) | |
2484 | msleep(HCLGE_RESET_WATI_MS); | |
2485 | ||
2486 | if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { | |
2487 | dev_err(&hdev->pdev->dev, | |
2488 | "flr wait timeout: %d\n", cnt); | |
2489 | return -EBUSY; | |
2490 | } | |
2491 | ||
2492 | return 0; | |
2493 | } | |
2494 | ||
4ed340ab | 2495 | val = hclge_read_dev(&hdev->hw, reg); |
ccc23ef3 | 2496 | while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { |
4ed340ab L |
2497 | msleep(HCLGE_RESET_WATI_MS); |
2498 | val = hclge_read_dev(&hdev->hw, reg); | |
2499 | cnt++; | |
2500 | } | |
2501 | ||
4ed340ab L |
2502 | if (cnt >= HCLGE_RESET_WAIT_CNT) { |
2503 | dev_warn(&hdev->pdev->dev, | |
2504 | "Wait for reset timeout: %d\n", hdev->reset_type); | |
2505 | return -EBUSY; | |
2506 | } | |
2507 | ||
2508 | return 0; | |
2509 | } | |
2510 | ||
7885e906 HT |
2511 | static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) |
2512 | { | |
2513 | struct hclge_vf_rst_cmd *req; | |
2514 | struct hclge_desc desc; | |
2515 | ||
2516 | req = (struct hclge_vf_rst_cmd *)desc.data; | |
2517 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); | |
2518 | req->dest_vfid = func_id; | |
2519 | ||
2520 | if (reset) | |
2521 | req->vf_rst = 0x1; | |
2522 | ||
2523 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
2524 | } | |
2525 | ||
2526 | int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) | |
2527 | { | |
2528 | int i; | |
2529 | ||
2530 | for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { | |
2531 | struct hclge_vport *vport = &hdev->vport[i]; | |
2532 | int ret; | |
2533 | ||
2534 | /* Send cmd to set/clear VF's FUNC_RST_ING */ | |
2535 | ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); | |
2536 | if (ret) { | |
2537 | dev_err(&hdev->pdev->dev, | |
5f9c2a66 | 2538 | "set vf(%d) rst failed %d!\n", |
7885e906 HT |
2539 | vport->vport_id, ret); |
2540 | return ret; | |
2541 | } | |
2542 | ||
2543 | if (!reset) | |
2544 | continue; | |
2545 | ||
2546 | /* Inform VF to process the reset. | |
2547 | * hclge_inform_reset_assert_to_vf may fail if VF | |
2548 | * driver is not loaded. | |
2549 | */ | |
2550 | ret = hclge_inform_reset_assert_to_vf(vport); | |
2551 | if (ret) | |
2552 | dev_warn(&hdev->pdev->dev, | |
5f9c2a66 | 2553 | "inform reset to vf(%d) failed %d!\n", |
7885e906 HT |
2554 | vport->vport_id, ret); |
2555 | } | |
2556 | ||
2557 | return 0; | |
2558 | } | |
2559 | ||
13a86fae | 2560 | int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) |
4ed340ab L |
2561 | { |
2562 | struct hclge_desc desc; | |
2563 | struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; | |
2564 | int ret; | |
2565 | ||
2566 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); | |
ccc23ef3 | 2567 | hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); |
4ed340ab L |
2568 | req->fun_reset_vfid = func_id; |
2569 | ||
2570 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2571 | if (ret) | |
2572 | dev_err(&hdev->pdev->dev, | |
2573 | "send function reset cmd fail, status =%d\n", ret); | |
2574 | ||
2575 | return ret; | |
2576 | } | |
2577 | ||
d5752031 | 2578 | static void hclge_do_reset(struct hclge_dev *hdev) |
4ed340ab L |
2579 | { |
2580 | struct pci_dev *pdev = hdev->pdev; | |
2581 | u32 val; | |
2582 | ||
d5752031 | 2583 | switch (hdev->reset_type) { |
4ed340ab L |
2584 | case HNAE3_GLOBAL_RESET: |
2585 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
ccc23ef3 | 2586 | hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); |
4ed340ab L |
2587 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2588 | dev_info(&pdev->dev, "Global Reset requested\n"); | |
2589 | break; | |
2590 | case HNAE3_CORE_RESET: | |
2591 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
ccc23ef3 | 2592 | hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); |
4ed340ab L |
2593 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2594 | dev_info(&pdev->dev, "Core Reset requested\n"); | |
2595 | break; | |
2596 | case HNAE3_FUNC_RESET: | |
2597 | dev_info(&pdev->dev, "PF Reset requested\n"); | |
ed4a1bb8 SM |
2598 | /* schedule again to check later */ |
2599 | set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); | |
2600 | hclge_reset_task_schedule(hdev); | |
4ed340ab | 2601 | break; |
26977990 HT |
2602 | case HNAE3_FLR_RESET: |
2603 | dev_info(&pdev->dev, "FLR requested\n"); | |
2604 | /* schedule again to check later */ | |
2605 | set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); | |
2606 | hclge_reset_task_schedule(hdev); | |
2607 | break; | |
4ed340ab L |
2608 | default: |
2609 | dev_warn(&pdev->dev, | |
d5752031 | 2610 | "Unsupported reset type: %d\n", hdev->reset_type); |
4ed340ab L |
2611 | break; |
2612 | } | |
2613 | } | |
2614 | ||
d5752031 SM |
2615 | static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, |
2616 | unsigned long *addr) | |
2617 | { | |
2618 | enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; | |
2619 | ||
00029070 SM |
2620 | /* first, resolve any unknown reset type to the known type(s) */ |
2621 | if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { | |
2622 | /* we will intentionally ignore any errors from this function | |
2623 | * as we will end up in *some* reset request in any case | |
2624 | */ | |
2625 | hclge_handle_hw_msix_error(hdev, addr); | |
2626 | clear_bit(HNAE3_UNKNOWN_RESET, addr); | |
2627 | /* We defered the clearing of the error event which caused | |
2628 | * interrupt since it was not posssible to do that in | |
2629 | * interrupt context (and this is the reason we introduced | |
2630 | * new UNKNOWN reset type). Now, the errors have been | |
2631 | * handled and cleared in hardware we can safely enable | |
2632 | * interrupts. This is an exception to the norm. | |
2633 | */ | |
2634 | hclge_enable_vector(&hdev->misc_vector, true); | |
2635 | } | |
2636 | ||
d5752031 | 2637 | /* return the highest priority reset level amongst all */ |
62aff578 HT |
2638 | if (test_bit(HNAE3_IMP_RESET, addr)) { |
2639 | rst_level = HNAE3_IMP_RESET; | |
2640 | clear_bit(HNAE3_IMP_RESET, addr); | |
2641 | clear_bit(HNAE3_GLOBAL_RESET, addr); | |
2642 | clear_bit(HNAE3_CORE_RESET, addr); | |
2643 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2644 | } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { | |
d5752031 | 2645 | rst_level = HNAE3_GLOBAL_RESET; |
62aff578 HT |
2646 | clear_bit(HNAE3_GLOBAL_RESET, addr); |
2647 | clear_bit(HNAE3_CORE_RESET, addr); | |
2648 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2649 | } else if (test_bit(HNAE3_CORE_RESET, addr)) { | |
d5752031 | 2650 | rst_level = HNAE3_CORE_RESET; |
62aff578 HT |
2651 | clear_bit(HNAE3_CORE_RESET, addr); |
2652 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2653 | } else if (test_bit(HNAE3_FUNC_RESET, addr)) { | |
d5752031 | 2654 | rst_level = HNAE3_FUNC_RESET; |
62aff578 | 2655 | clear_bit(HNAE3_FUNC_RESET, addr); |
26977990 HT |
2656 | } else if (test_bit(HNAE3_FLR_RESET, addr)) { |
2657 | rst_level = HNAE3_FLR_RESET; | |
2658 | clear_bit(HNAE3_FLR_RESET, addr); | |
62aff578 | 2659 | } |
d5752031 SM |
2660 | |
2661 | return rst_level; | |
2662 | } | |
2663 | ||
e9a50d09 YL |
2664 | static void hclge_clear_reset_cause(struct hclge_dev *hdev) |
2665 | { | |
2666 | u32 clearval = 0; | |
2667 | ||
2668 | switch (hdev->reset_type) { | |
2669 | case HNAE3_IMP_RESET: | |
2670 | clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2671 | break; | |
2672 | case HNAE3_GLOBAL_RESET: | |
2673 | clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2674 | break; | |
2675 | case HNAE3_CORE_RESET: | |
2676 | clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2677 | break; | |
2678 | default: | |
e9a50d09 YL |
2679 | break; |
2680 | } | |
2681 | ||
2682 | if (!clearval) | |
2683 | return; | |
2684 | ||
2685 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); | |
2686 | hclge_enable_vector(&hdev->misc_vector, true); | |
2687 | } | |
2688 | ||
7885e906 HT |
2689 | static int hclge_reset_prepare_down(struct hclge_dev *hdev) |
2690 | { | |
2691 | int ret = 0; | |
2692 | ||
2693 | switch (hdev->reset_type) { | |
2694 | case HNAE3_FUNC_RESET: | |
26977990 HT |
2695 | /* fall through */ |
2696 | case HNAE3_FLR_RESET: | |
7885e906 HT |
2697 | ret = hclge_set_all_vf_rst(hdev, true); |
2698 | break; | |
2699 | default: | |
2700 | break; | |
2701 | } | |
2702 | ||
2703 | return ret; | |
2704 | } | |
2705 | ||
48ac80db HT |
2706 | static int hclge_reset_prepare_wait(struct hclge_dev *hdev) |
2707 | { | |
de2eae69 | 2708 | u32 reg_val; |
48ac80db HT |
2709 | int ret = 0; |
2710 | ||
2711 | switch (hdev->reset_type) { | |
2712 | case HNAE3_FUNC_RESET: | |
7885e906 HT |
2713 | /* There is no mechanism for PF to know if VF has stopped IO |
2714 | * for now, just wait 100 ms for VF to stop IO | |
2715 | */ | |
2716 | msleep(100); | |
48ac80db HT |
2717 | ret = hclge_func_reset_cmd(hdev, 0); |
2718 | if (ret) { | |
2719 | dev_err(&hdev->pdev->dev, | |
7707c27b | 2720 | "asserting function reset fail %d!\n", ret); |
48ac80db HT |
2721 | return ret; |
2722 | } | |
2723 | ||
2724 | /* After performaning pf reset, it is not necessary to do the | |
2725 | * mailbox handling or send any command to firmware, because | |
2726 | * any mailbox handling or command to firmware is only valid | |
2727 | * after hclge_cmd_init is called. | |
2728 | */ | |
2729 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2730 | break; | |
26977990 HT |
2731 | case HNAE3_FLR_RESET: |
2732 | /* There is no mechanism for PF to know if VF has stopped IO | |
2733 | * for now, just wait 100 ms for VF to stop IO | |
2734 | */ | |
2735 | msleep(100); | |
2736 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2737 | set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
2738 | break; | |
de2eae69 HT |
2739 | case HNAE3_IMP_RESET: |
2740 | reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); | |
2741 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, | |
2742 | BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); | |
2743 | break; | |
48ac80db HT |
2744 | default: |
2745 | break; | |
2746 | } | |
2747 | ||
2748 | dev_info(&hdev->pdev->dev, "prepare wait ok\n"); | |
2749 | ||
2750 | return ret; | |
2751 | } | |
2752 | ||
1afdb53a HT |
2753 | static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) |
2754 | { | |
2755 | #define MAX_RESET_FAIL_CNT 5 | |
2756 | #define RESET_UPGRADE_DELAY_SEC 10 | |
2757 | ||
2758 | if (hdev->reset_pending) { | |
2759 | dev_info(&hdev->pdev->dev, "Reset pending %lu\n", | |
2760 | hdev->reset_pending); | |
2761 | return true; | |
2762 | } else if ((hdev->reset_type != HNAE3_IMP_RESET) && | |
2763 | (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & | |
2764 | BIT(HCLGE_IMP_RESET_BIT))) { | |
2765 | dev_info(&hdev->pdev->dev, | |
2766 | "reset failed because IMP Reset is pending\n"); | |
2767 | hclge_clear_reset_cause(hdev); | |
2768 | return false; | |
2769 | } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { | |
2770 | hdev->reset_fail_cnt++; | |
2771 | if (is_timeout) { | |
2772 | set_bit(hdev->reset_type, &hdev->reset_pending); | |
2773 | dev_info(&hdev->pdev->dev, | |
2774 | "re-schedule to wait for hw reset done\n"); | |
2775 | return true; | |
2776 | } | |
2777 | ||
2778 | dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); | |
2779 | hclge_clear_reset_cause(hdev); | |
2780 | mod_timer(&hdev->reset_timer, | |
2781 | jiffies + RESET_UPGRADE_DELAY_SEC * HZ); | |
2782 | ||
2783 | return false; | |
2784 | } | |
2785 | ||
2786 | hclge_clear_reset_cause(hdev); | |
2787 | dev_err(&hdev->pdev->dev, "Reset fail!\n"); | |
2788 | return false; | |
2789 | } | |
2790 | ||
7885e906 HT |
2791 | static int hclge_reset_prepare_up(struct hclge_dev *hdev) |
2792 | { | |
2793 | int ret = 0; | |
2794 | ||
2795 | switch (hdev->reset_type) { | |
2796 | case HNAE3_FUNC_RESET: | |
26977990 HT |
2797 | /* fall through */ |
2798 | case HNAE3_FLR_RESET: | |
7885e906 HT |
2799 | ret = hclge_set_all_vf_rst(hdev, false); |
2800 | break; | |
2801 | default: | |
2802 | break; | |
2803 | } | |
2804 | ||
2805 | return ret; | |
2806 | } | |
2807 | ||
d5752031 SM |
2808 | static void hclge_reset(struct hclge_dev *hdev) |
2809 | { | |
7ce98982 | 2810 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); |
1afdb53a HT |
2811 | bool is_timeout = false; |
2812 | int ret; | |
1a45360a | 2813 | |
7ce98982 JS |
2814 | /* Initialize ae_dev reset status as well, in case enet layer wants to |
2815 | * know if device is undergoing reset | |
2816 | */ | |
2817 | ae_dev->reset_type = hdev->reset_type; | |
225c02eb | 2818 | hdev->reset_count++; |
1a2f7bf2 | 2819 | hdev->last_reset_time = jiffies; |
d5752031 | 2820 | /* perform reset of the stack & ae device for a client */ |
1afdb53a HT |
2821 | ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); |
2822 | if (ret) | |
2823 | goto err_reset; | |
2824 | ||
7885e906 HT |
2825 | ret = hclge_reset_prepare_down(hdev); |
2826 | if (ret) | |
2827 | goto err_reset; | |
2828 | ||
47622dc9 | 2829 | rtnl_lock(); |
1afdb53a HT |
2830 | ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
2831 | if (ret) | |
2832 | goto err_reset_lock; | |
d5752031 | 2833 | |
1afdb53a | 2834 | rtnl_unlock(); |
48ac80db | 2835 | |
1afdb53a HT |
2836 | ret = hclge_reset_prepare_wait(hdev); |
2837 | if (ret) | |
2838 | goto err_reset; | |
e9a50d09 | 2839 | |
1afdb53a HT |
2840 | if (hclge_reset_wait(hdev)) { |
2841 | is_timeout = true; | |
2842 | goto err_reset; | |
d5752031 SM |
2843 | } |
2844 | ||
1afdb53a HT |
2845 | ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); |
2846 | if (ret) | |
2847 | goto err_reset; | |
2848 | ||
2849 | rtnl_lock(); | |
2850 | ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); | |
2851 | if (ret) | |
2852 | goto err_reset_lock; | |
2853 | ||
2854 | ret = hclge_reset_ae_dev(hdev->ae_dev); | |
2855 | if (ret) | |
2856 | goto err_reset_lock; | |
2857 | ||
2858 | ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); | |
2859 | if (ret) | |
2860 | goto err_reset_lock; | |
2861 | ||
2862 | hclge_clear_reset_cause(hdev); | |
2863 | ||
7885e906 HT |
2864 | ret = hclge_reset_prepare_up(hdev); |
2865 | if (ret) | |
2866 | goto err_reset_lock; | |
2867 | ||
1afdb53a HT |
2868 | ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); |
2869 | if (ret) | |
2870 | goto err_reset_lock; | |
2871 | ||
47622dc9 | 2872 | rtnl_unlock(); |
3db6b633 | 2873 | |
1afdb53a HT |
2874 | ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); |
2875 | if (ret) | |
2876 | goto err_reset; | |
2877 | ||
2878 | ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); | |
2879 | if (ret) | |
2880 | goto err_reset; | |
2881 | ||
2882 | return; | |
2883 | ||
2884 | err_reset_lock: | |
2885 | rtnl_unlock(); | |
2886 | err_reset: | |
2887 | if (hclge_reset_err_handle(hdev, is_timeout)) | |
2888 | hclge_reset_task_schedule(hdev); | |
d5752031 SM |
2889 | } |
2890 | ||
538d8ba0 SJ |
2891 | static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) |
2892 | { | |
2893 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
2894 | struct hclge_dev *hdev = ae_dev->priv; | |
2895 | ||
2896 | /* We might end up getting called broadly because of 2 below cases: | |
2897 | * 1. Recoverable error was conveyed through APEI and only way to bring | |
2898 | * normalcy is to reset. | |
2899 | * 2. A new reset request from the stack due to timeout | |
2900 | * | |
2901 | * For the first case,error event might not have ae handle available. | |
2902 | * check if this is a new reset request and we are not here just because | |
4aef908d SM |
2903 | * last reset attempt did not succeed and watchdog hit us again. We will |
2904 | * know this if last reset request did not occur very recently (watchdog | |
2905 | * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) | |
2906 | * In case of new request we reset the "reset level" to PF reset. | |
1a45360a HT |
2907 | * And if it is a repeat reset request of the most recent one then we |
2908 | * want to make sure we throttle the reset request. Therefore, we will | |
2909 | * not allow it again before 3*HZ times. | |
4aef908d | 2910 | */ |
538d8ba0 SJ |
2911 | if (!handle) |
2912 | handle = &hdev->vport[0].nic; | |
2913 | ||
1a2f7bf2 | 2914 | if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) |
1a45360a | 2915 | return; |
2c883d73 | 2916 | else if (hdev->default_reset_request) |
1a2f7bf2 | 2917 | hdev->reset_level = |
2c883d73 HT |
2918 | hclge_get_reset_level(hdev, |
2919 | &hdev->default_reset_request); | |
1a2f7bf2 HT |
2920 | else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) |
2921 | hdev->reset_level = HNAE3_FUNC_RESET; | |
4ed340ab | 2922 | |
4aef908d | 2923 | dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", |
1a2f7bf2 | 2924 | hdev->reset_level); |
4aef908d SM |
2925 | |
2926 | /* request reset & schedule reset task */ | |
1a2f7bf2 | 2927 | set_bit(hdev->reset_level, &hdev->reset_request); |
4aef908d SM |
2928 | hclge_reset_task_schedule(hdev); |
2929 | ||
1a2f7bf2 HT |
2930 | if (hdev->reset_level < HNAE3_GLOBAL_RESET) |
2931 | hdev->reset_level++; | |
4ed340ab L |
2932 | } |
2933 | ||
2c883d73 HT |
2934 | static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, |
2935 | enum hnae3_reset_type rst_type) | |
2936 | { | |
2937 | struct hclge_dev *hdev = ae_dev->priv; | |
2938 | ||
2939 | set_bit(rst_type, &hdev->default_reset_request); | |
2940 | } | |
2941 | ||
1afdb53a HT |
2942 | static void hclge_reset_timer(struct timer_list *t) |
2943 | { | |
2944 | struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); | |
2945 | ||
2946 | dev_info(&hdev->pdev->dev, | |
2947 | "triggering global reset in reset timer\n"); | |
2948 | set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); | |
2949 | hclge_reset_event(hdev->pdev, NULL); | |
2950 | } | |
2951 | ||
4ed340ab L |
2952 | static void hclge_reset_subtask(struct hclge_dev *hdev) |
2953 | { | |
d5752031 SM |
2954 | /* check if there is any ongoing reset in the hardware. This status can |
2955 | * be checked from reset_pending. If there is then, we need to wait for | |
2956 | * hardware to complete reset. | |
2957 | * a. If we are able to figure out in reasonable time that hardware | |
2958 | * has fully resetted then, we can proceed with driver, client | |
2959 | * reset. | |
2960 | * b. else, we can come back later to check this status so re-sched | |
2961 | * now. | |
2962 | */ | |
1a2f7bf2 | 2963 | hdev->last_reset_time = jiffies; |
d5752031 SM |
2964 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); |
2965 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2966 | hclge_reset(hdev); | |
4ed340ab | 2967 | |
d5752031 SM |
2968 | /* check if we got any *new* reset requests to be honored */ |
2969 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); | |
2970 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2971 | hclge_do_reset(hdev); | |
4ed340ab | 2972 | |
4ed340ab L |
2973 | hdev->reset_type = HNAE3_NONE_RESET; |
2974 | } | |
2975 | ||
ed4a1bb8 | 2976 | static void hclge_reset_service_task(struct work_struct *work) |
466b0c00 | 2977 | { |
ed4a1bb8 SM |
2978 | struct hclge_dev *hdev = |
2979 | container_of(work, struct hclge_dev, rst_service_task); | |
2980 | ||
2981 | if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) | |
2982 | return; | |
2983 | ||
2984 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
2985 | ||
4ed340ab | 2986 | hclge_reset_subtask(hdev); |
ed4a1bb8 SM |
2987 | |
2988 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
466b0c00 L |
2989 | } |
2990 | ||
22fd3468 SM |
2991 | static void hclge_mailbox_service_task(struct work_struct *work) |
2992 | { | |
2993 | struct hclge_dev *hdev = | |
2994 | container_of(work, struct hclge_dev, mbx_service_task); | |
2995 | ||
2996 | if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) | |
2997 | return; | |
2998 | ||
2999 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
3000 | ||
3001 | hclge_mbx_handler(hdev); | |
3002 | ||
3003 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
3004 | } | |
3005 | ||
337460de YL |
3006 | static void hclge_update_vport_alive(struct hclge_dev *hdev) |
3007 | { | |
3008 | int i; | |
3009 | ||
3010 | /* start from vport 1 for PF is always alive */ | |
3011 | for (i = 1; i < hdev->num_alloc_vport; i++) { | |
3012 | struct hclge_vport *vport = &hdev->vport[i]; | |
3013 | ||
3014 | if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) | |
3015 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
b2c04029 YL |
3016 | |
3017 | /* If vf is not alive, set to default value */ | |
3018 | if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) | |
3019 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; | |
337460de YL |
3020 | } |
3021 | } | |
3022 | ||
46a3df9f S |
3023 | static void hclge_service_task(struct work_struct *work) |
3024 | { | |
3025 | struct hclge_dev *hdev = | |
3026 | container_of(work, struct hclge_dev, service_task); | |
3027 | ||
7a5d2a39 JS |
3028 | if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { |
3029 | hclge_update_stats_for_all(hdev); | |
3030 | hdev->hw_stats.stats_timer = 0; | |
3031 | } | |
3032 | ||
46a3df9f S |
3033 | hclge_update_speed_duplex(hdev); |
3034 | hclge_update_link_status(hdev); | |
337460de | 3035 | hclge_update_vport_alive(hdev); |
46a3df9f S |
3036 | hclge_service_complete(hdev); |
3037 | } | |
3038 | ||
46a3df9f S |
3039 | struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) |
3040 | { | |
3041 | /* VF handle has no client */ | |
3042 | if (!handle->client) | |
3043 | return container_of(handle, struct hclge_vport, nic); | |
3044 | else if (handle->client->type == HNAE3_CLIENT_ROCE) | |
3045 | return container_of(handle, struct hclge_vport, roce); | |
3046 | else | |
3047 | return container_of(handle, struct hclge_vport, nic); | |
3048 | } | |
3049 | ||
3050 | static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, | |
3051 | struct hnae3_vector_info *vector_info) | |
3052 | { | |
3053 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3054 | struct hnae3_vector_info *vector = vector_info; | |
3055 | struct hclge_dev *hdev = vport->back; | |
3056 | int alloc = 0; | |
3057 | int i, j; | |
3058 | ||
3059 | vector_num = min(hdev->num_msi_left, vector_num); | |
3060 | ||
3061 | for (j = 0; j < vector_num; j++) { | |
3062 | for (i = 1; i < hdev->num_msi; i++) { | |
3063 | if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { | |
3064 | vector->vector = pci_irq_vector(hdev->pdev, i); | |
3065 | vector->io_addr = hdev->hw.io_base + | |
3066 | HCLGE_VECTOR_REG_BASE + | |
3067 | (i - 1) * HCLGE_VECTOR_REG_OFFSET + | |
3068 | vport->vport_id * | |
3069 | HCLGE_VECTOR_VF_OFFSET; | |
3070 | hdev->vector_status[i] = vport->vport_id; | |
887c3820 | 3071 | hdev->vector_irq[i] = vector->vector; |
46a3df9f S |
3072 | |
3073 | vector++; | |
3074 | alloc++; | |
3075 | ||
3076 | break; | |
3077 | } | |
3078 | } | |
3079 | } | |
3080 | hdev->num_msi_left -= alloc; | |
3081 | hdev->num_msi_used += alloc; | |
3082 | ||
3083 | return alloc; | |
3084 | } | |
3085 | ||
3086 | static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) | |
3087 | { | |
3088 | int i; | |
3089 | ||
887c3820 SM |
3090 | for (i = 0; i < hdev->num_msi; i++) |
3091 | if (vector == hdev->vector_irq[i]) | |
3092 | return i; | |
3093 | ||
46a3df9f S |
3094 | return -EINVAL; |
3095 | } | |
3096 | ||
7412200c YL |
3097 | static int hclge_put_vector(struct hnae3_handle *handle, int vector) |
3098 | { | |
3099 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3100 | struct hclge_dev *hdev = vport->back; | |
3101 | int vector_id; | |
3102 | ||
3103 | vector_id = hclge_get_vector_index(hdev, vector); | |
3104 | if (vector_id < 0) { | |
3105 | dev_err(&hdev->pdev->dev, | |
3106 | "Get vector index fail. vector_id =%d\n", vector_id); | |
3107 | return vector_id; | |
3108 | } | |
3109 | ||
3110 | hclge_free_vector(hdev, vector_id); | |
3111 | ||
3112 | return 0; | |
3113 | } | |
3114 | ||
46a3df9f S |
3115 | static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) |
3116 | { | |
3117 | return HCLGE_RSS_KEY_SIZE; | |
3118 | } | |
3119 | ||
3120 | static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) | |
3121 | { | |
3122 | return HCLGE_RSS_IND_TBL_SIZE; | |
3123 | } | |
3124 | ||
46a3df9f S |
3125 | static int hclge_set_rss_algo_key(struct hclge_dev *hdev, |
3126 | const u8 hfunc, const u8 *key) | |
3127 | { | |
d44f9b63 | 3128 | struct hclge_rss_config_cmd *req; |
46a3df9f S |
3129 | struct hclge_desc desc; |
3130 | int key_offset; | |
3131 | int key_size; | |
3132 | int ret; | |
3133 | ||
d44f9b63 | 3134 | req = (struct hclge_rss_config_cmd *)desc.data; |
46a3df9f S |
3135 | |
3136 | for (key_offset = 0; key_offset < 3; key_offset++) { | |
3137 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, | |
3138 | false); | |
3139 | ||
3140 | req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); | |
3141 | req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); | |
3142 | ||
3143 | if (key_offset == 2) | |
3144 | key_size = | |
3145 | HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; | |
3146 | else | |
3147 | key_size = HCLGE_RSS_HASH_KEY_NUM; | |
3148 | ||
3149 | memcpy(req->hash_key, | |
3150 | key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); | |
3151 | ||
3152 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3153 | if (ret) { | |
3154 | dev_err(&hdev->pdev->dev, | |
3155 | "Configure RSS config fail, status = %d\n", | |
3156 | ret); | |
3157 | return ret; | |
3158 | } | |
3159 | } | |
3160 | return 0; | |
3161 | } | |
3162 | ||
dcd4ef5e | 3163 | static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) |
46a3df9f | 3164 | { |
d44f9b63 | 3165 | struct hclge_rss_indirection_table_cmd *req; |
46a3df9f S |
3166 | struct hclge_desc desc; |
3167 | int i, j; | |
3168 | int ret; | |
3169 | ||
d44f9b63 | 3170 | req = (struct hclge_rss_indirection_table_cmd *)desc.data; |
46a3df9f S |
3171 | |
3172 | for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { | |
3173 | hclge_cmd_setup_basic_desc | |
3174 | (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); | |
3175 | ||
a90bb9a5 YL |
3176 | req->start_table_index = |
3177 | cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); | |
3178 | req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); | |
46a3df9f S |
3179 | |
3180 | for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) | |
3181 | req->rss_result[j] = | |
3182 | indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; | |
3183 | ||
3184 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3185 | if (ret) { | |
3186 | dev_err(&hdev->pdev->dev, | |
3187 | "Configure rss indir table fail,status = %d\n", | |
3188 | ret); | |
3189 | return ret; | |
3190 | } | |
3191 | } | |
3192 | return 0; | |
3193 | } | |
3194 | ||
3195 | static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, | |
3196 | u16 *tc_size, u16 *tc_offset) | |
3197 | { | |
d44f9b63 | 3198 | struct hclge_rss_tc_mode_cmd *req; |
46a3df9f S |
3199 | struct hclge_desc desc; |
3200 | int ret; | |
3201 | int i; | |
3202 | ||
3203 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); | |
d44f9b63 | 3204 | req = (struct hclge_rss_tc_mode_cmd *)desc.data; |
46a3df9f S |
3205 | |
3206 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
a90bb9a5 YL |
3207 | u16 mode = 0; |
3208 | ||
ccc23ef3 PL |
3209 | hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); |
3210 | hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, | |
3211 | HCLGE_RSS_TC_SIZE_S, tc_size[i]); | |
3212 | hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, | |
3213 | HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); | |
a90bb9a5 YL |
3214 | |
3215 | req->rss_tc_mode[i] = cpu_to_le16(mode); | |
46a3df9f S |
3216 | } |
3217 | ||
3218 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 3219 | if (ret) |
46a3df9f S |
3220 | dev_err(&hdev->pdev->dev, |
3221 | "Configure rss tc mode fail, status = %d\n", ret); | |
46a3df9f | 3222 | |
90415e85 | 3223 | return ret; |
46a3df9f S |
3224 | } |
3225 | ||
8e4c877d PL |
3226 | static void hclge_get_rss_type(struct hclge_vport *vport) |
3227 | { | |
3228 | if (vport->rss_tuple_sets.ipv4_tcp_en || | |
3229 | vport->rss_tuple_sets.ipv4_udp_en || | |
3230 | vport->rss_tuple_sets.ipv4_sctp_en || | |
3231 | vport->rss_tuple_sets.ipv6_tcp_en || | |
3232 | vport->rss_tuple_sets.ipv6_udp_en || | |
3233 | vport->rss_tuple_sets.ipv6_sctp_en) | |
3234 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; | |
3235 | else if (vport->rss_tuple_sets.ipv4_fragment_en || | |
3236 | vport->rss_tuple_sets.ipv6_fragment_en) | |
3237 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; | |
3238 | else | |
3239 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; | |
3240 | } | |
3241 | ||
46a3df9f S |
3242 | static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) |
3243 | { | |
d44f9b63 | 3244 | struct hclge_rss_input_tuple_cmd *req; |
46a3df9f S |
3245 | struct hclge_desc desc; |
3246 | int ret; | |
3247 | ||
3248 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); | |
3249 | ||
d44f9b63 | 3250 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; |
637053ef YL |
3251 | |
3252 | /* Get the tuple cfg from pf */ | |
3253 | req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; | |
3254 | req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; | |
3255 | req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; | |
3256 | req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; | |
3257 | req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; | |
3258 | req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; | |
3259 | req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; | |
3260 | req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; | |
8e4c877d | 3261 | hclge_get_rss_type(&hdev->vport[0]); |
46a3df9f | 3262 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
90415e85 | 3263 | if (ret) |
46a3df9f S |
3264 | dev_err(&hdev->pdev->dev, |
3265 | "Configure rss input fail, status = %d\n", ret); | |
90415e85 | 3266 | return ret; |
46a3df9f S |
3267 | } |
3268 | ||
3269 | static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, | |
3270 | u8 *key, u8 *hfunc) | |
3271 | { | |
3272 | struct hclge_vport *vport = hclge_get_vport(handle); | |
46a3df9f S |
3273 | int i; |
3274 | ||
3275 | /* Get hash algorithm */ | |
6868d695 JS |
3276 | if (hfunc) { |
3277 | switch (vport->rss_algo) { | |
3278 | case HCLGE_RSS_HASH_ALGO_TOEPLITZ: | |
3279 | *hfunc = ETH_RSS_HASH_TOP; | |
3280 | break; | |
3281 | case HCLGE_RSS_HASH_ALGO_SIMPLE: | |
3282 | *hfunc = ETH_RSS_HASH_XOR; | |
3283 | break; | |
3284 | default: | |
3285 | *hfunc = ETH_RSS_HASH_UNKNOWN; | |
3286 | break; | |
3287 | } | |
3288 | } | |
46a3df9f S |
3289 | |
3290 | /* Get the RSS Key required by the user */ | |
3291 | if (key) | |
3292 | memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
3293 | ||
3294 | /* Get indirect table */ | |
3295 | if (indir) | |
3296 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3297 | indir[i] = vport->rss_indirection_tbl[i]; | |
3298 | ||
3299 | return 0; | |
3300 | } | |
3301 | ||
3302 | static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, | |
3303 | const u8 *key, const u8 hfunc) | |
3304 | { | |
3305 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3306 | struct hclge_dev *hdev = vport->back; | |
3307 | u8 hash_algo; | |
3308 | int ret, i; | |
3309 | ||
3310 | /* Set the RSS Hash Key if specififed by the user */ | |
3311 | if (key) { | |
6868d695 JS |
3312 | switch (hfunc) { |
3313 | case ETH_RSS_HASH_TOP: | |
46a3df9f | 3314 | hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; |
6868d695 JS |
3315 | break; |
3316 | case ETH_RSS_HASH_XOR: | |
3317 | hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; | |
3318 | break; | |
3319 | case ETH_RSS_HASH_NO_CHANGE: | |
3320 | hash_algo = vport->rss_algo; | |
3321 | break; | |
3322 | default: | |
46a3df9f | 3323 | return -EINVAL; |
6868d695 JS |
3324 | } |
3325 | ||
46a3df9f S |
3326 | ret = hclge_set_rss_algo_key(hdev, hash_algo, key); |
3327 | if (ret) | |
3328 | return ret; | |
dcd4ef5e YL |
3329 | |
3330 | /* Update the shadow RSS key with user specified qids */ | |
3331 | memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); | |
3332 | vport->rss_algo = hash_algo; | |
46a3df9f S |
3333 | } |
3334 | ||
3335 | /* Update the shadow RSS table with user specified qids */ | |
3336 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3337 | vport->rss_indirection_tbl[i] = indir[i]; | |
3338 | ||
3339 | /* Update the hardware */ | |
dcd4ef5e | 3340 | return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); |
46a3df9f S |
3341 | } |
3342 | ||
f7db940a L |
3343 | static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) |
3344 | { | |
3345 | u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; | |
3346 | ||
3347 | if (nfc->data & RXH_L4_B_2_3) | |
3348 | hash_sets |= HCLGE_D_PORT_BIT; | |
3349 | else | |
3350 | hash_sets &= ~HCLGE_D_PORT_BIT; | |
3351 | ||
3352 | if (nfc->data & RXH_IP_SRC) | |
3353 | hash_sets |= HCLGE_S_IP_BIT; | |
3354 | else | |
3355 | hash_sets &= ~HCLGE_S_IP_BIT; | |
3356 | ||
3357 | if (nfc->data & RXH_IP_DST) | |
3358 | hash_sets |= HCLGE_D_IP_BIT; | |
3359 | else | |
3360 | hash_sets &= ~HCLGE_D_IP_BIT; | |
3361 | ||
3362 | if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) | |
3363 | hash_sets |= HCLGE_V_TAG_BIT; | |
3364 | ||
3365 | return hash_sets; | |
3366 | } | |
3367 | ||
3368 | static int hclge_set_rss_tuple(struct hnae3_handle *handle, | |
3369 | struct ethtool_rxnfc *nfc) | |
3370 | { | |
3371 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3372 | struct hclge_dev *hdev = vport->back; | |
3373 | struct hclge_rss_input_tuple_cmd *req; | |
3374 | struct hclge_desc desc; | |
3375 | u8 tuple_sets; | |
3376 | int ret; | |
3377 | ||
3378 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | | |
3379 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) | |
3380 | return -EINVAL; | |
3381 | ||
3382 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; | |
637053ef | 3383 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); |
f7db940a | 3384 | |
637053ef YL |
3385 | req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; |
3386 | req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; | |
3387 | req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; | |
3388 | req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; | |
3389 | req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; | |
3390 | req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; | |
3391 | req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; | |
3392 | req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; | |
f7db940a L |
3393 | |
3394 | tuple_sets = hclge_get_rss_hash_bits(nfc); | |
3395 | switch (nfc->flow_type) { | |
3396 | case TCP_V4_FLOW: | |
3397 | req->ipv4_tcp_en = tuple_sets; | |
3398 | break; | |
3399 | case TCP_V6_FLOW: | |
3400 | req->ipv6_tcp_en = tuple_sets; | |
3401 | break; | |
3402 | case UDP_V4_FLOW: | |
3403 | req->ipv4_udp_en = tuple_sets; | |
3404 | break; | |
3405 | case UDP_V6_FLOW: | |
3406 | req->ipv6_udp_en = tuple_sets; | |
3407 | break; | |
3408 | case SCTP_V4_FLOW: | |
3409 | req->ipv4_sctp_en = tuple_sets; | |
3410 | break; | |
3411 | case SCTP_V6_FLOW: | |
3412 | if ((nfc->data & RXH_L4_B_0_1) || | |
3413 | (nfc->data & RXH_L4_B_2_3)) | |
3414 | return -EINVAL; | |
3415 | ||
3416 | req->ipv6_sctp_en = tuple_sets; | |
3417 | break; | |
3418 | case IPV4_FLOW: | |
3419 | req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3420 | break; | |
3421 | case IPV6_FLOW: | |
3422 | req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3423 | break; | |
3424 | default: | |
3425 | return -EINVAL; | |
3426 | } | |
3427 | ||
3428 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
637053ef | 3429 | if (ret) { |
f7db940a L |
3430 | dev_err(&hdev->pdev->dev, |
3431 | "Set rss tuple fail, status = %d\n", ret); | |
637053ef YL |
3432 | return ret; |
3433 | } | |
f7db940a | 3434 | |
637053ef YL |
3435 | vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; |
3436 | vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; | |
3437 | vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; | |
3438 | vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; | |
3439 | vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; | |
3440 | vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; | |
3441 | vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; | |
3442 | vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; | |
8e4c877d | 3443 | hclge_get_rss_type(vport); |
637053ef | 3444 | return 0; |
f7db940a L |
3445 | } |
3446 | ||
07d29954 L |
3447 | static int hclge_get_rss_tuple(struct hnae3_handle *handle, |
3448 | struct ethtool_rxnfc *nfc) | |
3449 | { | |
3450 | struct hclge_vport *vport = hclge_get_vport(handle); | |
07d29954 | 3451 | u8 tuple_sets; |
07d29954 L |
3452 | |
3453 | nfc->data = 0; | |
3454 | ||
07d29954 L |
3455 | switch (nfc->flow_type) { |
3456 | case TCP_V4_FLOW: | |
637053ef | 3457 | tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; |
07d29954 L |
3458 | break; |
3459 | case UDP_V4_FLOW: | |
637053ef | 3460 | tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; |
07d29954 L |
3461 | break; |
3462 | case TCP_V6_FLOW: | |
637053ef | 3463 | tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; |
07d29954 L |
3464 | break; |
3465 | case UDP_V6_FLOW: | |
637053ef | 3466 | tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; |
07d29954 L |
3467 | break; |
3468 | case SCTP_V4_FLOW: | |
637053ef | 3469 | tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; |
07d29954 L |
3470 | break; |
3471 | case SCTP_V6_FLOW: | |
637053ef | 3472 | tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; |
07d29954 L |
3473 | break; |
3474 | case IPV4_FLOW: | |
3475 | case IPV6_FLOW: | |
3476 | tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; | |
3477 | break; | |
3478 | default: | |
3479 | return -EINVAL; | |
3480 | } | |
3481 | ||
3482 | if (!tuple_sets) | |
3483 | return 0; | |
3484 | ||
3485 | if (tuple_sets & HCLGE_D_PORT_BIT) | |
3486 | nfc->data |= RXH_L4_B_2_3; | |
3487 | if (tuple_sets & HCLGE_S_PORT_BIT) | |
3488 | nfc->data |= RXH_L4_B_0_1; | |
3489 | if (tuple_sets & HCLGE_D_IP_BIT) | |
3490 | nfc->data |= RXH_IP_DST; | |
3491 | if (tuple_sets & HCLGE_S_IP_BIT) | |
3492 | nfc->data |= RXH_IP_SRC; | |
3493 | ||
3494 | return 0; | |
3495 | } | |
3496 | ||
46a3df9f S |
3497 | static int hclge_get_tc_size(struct hnae3_handle *handle) |
3498 | { | |
3499 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3500 | struct hclge_dev *hdev = vport->back; | |
3501 | ||
3502 | return hdev->rss_size_max; | |
3503 | } | |
3504 | ||
77f255c1 | 3505 | int hclge_rss_init_hw(struct hclge_dev *hdev) |
46a3df9f | 3506 | { |
46a3df9f | 3507 | struct hclge_vport *vport = hdev->vport; |
8015bb74 YL |
3508 | u8 *rss_indir = vport[0].rss_indirection_tbl; |
3509 | u16 rss_size = vport[0].alloc_rss_size; | |
3510 | u8 *key = vport[0].rss_hash_key; | |
3511 | u8 hfunc = vport[0].rss_algo; | |
46a3df9f | 3512 | u16 tc_offset[HCLGE_MAX_TC_NUM]; |
46a3df9f S |
3513 | u16 tc_valid[HCLGE_MAX_TC_NUM]; |
3514 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
8015bb74 YL |
3515 | u16 roundup_size; |
3516 | int i, ret; | |
68ece54e | 3517 | |
46a3df9f S |
3518 | ret = hclge_set_rss_indir_table(hdev, rss_indir); |
3519 | if (ret) | |
8015bb74 | 3520 | return ret; |
46a3df9f | 3521 | |
46a3df9f S |
3522 | ret = hclge_set_rss_algo_key(hdev, hfunc, key); |
3523 | if (ret) | |
8015bb74 | 3524 | return ret; |
46a3df9f S |
3525 | |
3526 | ret = hclge_set_rss_input_tuple(hdev); | |
3527 | if (ret) | |
8015bb74 | 3528 | return ret; |
46a3df9f | 3529 | |
68ece54e YL |
3530 | /* Each TC have the same queue size, and tc_size set to hardware is |
3531 | * the log2 of roundup power of two of rss_size, the acutal queue | |
3532 | * size is limited by indirection table. | |
3533 | */ | |
3534 | if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { | |
3535 | dev_err(&hdev->pdev->dev, | |
3536 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | |
3537 | rss_size); | |
8015bb74 | 3538 | return -EINVAL; |
68ece54e YL |
3539 | } |
3540 | ||
3541 | roundup_size = roundup_pow_of_two(rss_size); | |
3542 | roundup_size = ilog2(roundup_size); | |
3543 | ||
46a3df9f | 3544 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
68ece54e | 3545 | tc_valid[i] = 0; |
46a3df9f | 3546 | |
68ece54e YL |
3547 | if (!(hdev->hw_tc_map & BIT(i))) |
3548 | continue; | |
3549 | ||
3550 | tc_valid[i] = 1; | |
3551 | tc_size[i] = roundup_size; | |
3552 | tc_offset[i] = rss_size * i; | |
46a3df9f | 3553 | } |
68ece54e | 3554 | |
8015bb74 YL |
3555 | return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); |
3556 | } | |
46a3df9f | 3557 | |
8015bb74 YL |
3558 | void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) |
3559 | { | |
3560 | struct hclge_vport *vport = hdev->vport; | |
3561 | int i, j; | |
46a3df9f | 3562 | |
8015bb74 YL |
3563 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { |
3564 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3565 | vport[j].rss_indirection_tbl[i] = | |
3566 | i % vport[j].alloc_rss_size; | |
3567 | } | |
3568 | } | |
3569 | ||
3570 | static void hclge_rss_init_cfg(struct hclge_dev *hdev) | |
3571 | { | |
3572 | struct hclge_vport *vport = hdev->vport; | |
3573 | int i; | |
3574 | ||
8015bb74 YL |
3575 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { |
3576 | vport[i].rss_tuple_sets.ipv4_tcp_en = | |
3577 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3578 | vport[i].rss_tuple_sets.ipv4_udp_en = | |
3579 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3580 | vport[i].rss_tuple_sets.ipv4_sctp_en = | |
3581 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3582 | vport[i].rss_tuple_sets.ipv4_fragment_en = | |
3583 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3584 | vport[i].rss_tuple_sets.ipv6_tcp_en = | |
3585 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3586 | vport[i].rss_tuple_sets.ipv6_udp_en = | |
3587 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3588 | vport[i].rss_tuple_sets.ipv6_sctp_en = | |
3589 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3590 | vport[i].rss_tuple_sets.ipv6_fragment_en = | |
3591 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3592 | ||
3593 | vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
823fe868 FL |
3594 | |
3595 | netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
8015bb74 YL |
3596 | } |
3597 | ||
3598 | hclge_rss_indir_init_cfg(hdev); | |
46a3df9f S |
3599 | } |
3600 | ||
63d7e66f SM |
3601 | int hclge_bind_ring_with_vector(struct hclge_vport *vport, |
3602 | int vector_id, bool en, | |
3603 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3604 | { |
3605 | struct hclge_dev *hdev = vport->back; | |
46a3df9f S |
3606 | struct hnae3_ring_chain_node *node; |
3607 | struct hclge_desc desc; | |
63d7e66f SM |
3608 | struct hclge_ctrl_vector_chain_cmd *req |
3609 | = (struct hclge_ctrl_vector_chain_cmd *)desc.data; | |
3610 | enum hclge_cmd_status status; | |
3611 | enum hclge_opcode_type op; | |
3612 | u16 tqp_type_and_id; | |
46a3df9f S |
3613 | int i; |
3614 | ||
63d7e66f SM |
3615 | op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; |
3616 | hclge_cmd_setup_basic_desc(&desc, op, false); | |
46a3df9f S |
3617 | req->int_vector_id = vector_id; |
3618 | ||
3619 | i = 0; | |
3620 | for (node = ring_chain; node; node = node->next) { | |
63d7e66f | 3621 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); |
ccc23ef3 PL |
3622 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, |
3623 | HCLGE_INT_TYPE_S, | |
3624 | hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); | |
3625 | hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, | |
3626 | HCLGE_TQP_ID_S, node->tqp_index); | |
3627 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, | |
3628 | HCLGE_INT_GL_IDX_S, | |
3629 | hnae3_get_field(node->int_gl_idx, | |
3630 | HNAE3_RING_GL_IDX_M, | |
3631 | HNAE3_RING_GL_IDX_S)); | |
63d7e66f | 3632 | req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); |
46a3df9f S |
3633 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
3634 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | |
63d7e66f | 3635 | req->vfid = vport->vport_id; |
46a3df9f | 3636 | |
63d7e66f SM |
3637 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
3638 | if (status) { | |
46a3df9f S |
3639 | dev_err(&hdev->pdev->dev, |
3640 | "Map TQP fail, status is %d.\n", | |
63d7e66f SM |
3641 | status); |
3642 | return -EIO; | |
46a3df9f S |
3643 | } |
3644 | i = 0; | |
3645 | ||
3646 | hclge_cmd_setup_basic_desc(&desc, | |
63d7e66f | 3647 | op, |
46a3df9f S |
3648 | false); |
3649 | req->int_vector_id = vector_id; | |
3650 | } | |
3651 | } | |
3652 | ||
3653 | if (i > 0) { | |
3654 | req->int_cause_num = i; | |
63d7e66f SM |
3655 | req->vfid = vport->vport_id; |
3656 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3657 | if (status) { | |
46a3df9f | 3658 | dev_err(&hdev->pdev->dev, |
63d7e66f SM |
3659 | "Map TQP fail, status is %d.\n", status); |
3660 | return -EIO; | |
46a3df9f S |
3661 | } |
3662 | } | |
3663 | ||
3664 | return 0; | |
3665 | } | |
3666 | ||
63d7e66f SM |
3667 | static int hclge_map_ring_to_vector(struct hnae3_handle *handle, |
3668 | int vector, | |
3669 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3670 | { |
3671 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3672 | struct hclge_dev *hdev = vport->back; | |
3673 | int vector_id; | |
3674 | ||
3675 | vector_id = hclge_get_vector_index(hdev, vector); | |
3676 | if (vector_id < 0) { | |
3677 | dev_err(&hdev->pdev->dev, | |
63d7e66f | 3678 | "Get vector index fail. vector_id =%d\n", vector_id); |
46a3df9f S |
3679 | return vector_id; |
3680 | } | |
3681 | ||
63d7e66f | 3682 | return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); |
46a3df9f S |
3683 | } |
3684 | ||
63d7e66f SM |
3685 | static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, |
3686 | int vector, | |
3687 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3688 | { |
3689 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3690 | struct hclge_dev *hdev = vport->back; | |
63d7e66f | 3691 | int vector_id, ret; |
46a3df9f | 3692 | |
f9637cc2 PL |
3693 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
3694 | return 0; | |
3695 | ||
46a3df9f S |
3696 | vector_id = hclge_get_vector_index(hdev, vector); |
3697 | if (vector_id < 0) { | |
3698 | dev_err(&handle->pdev->dev, | |
3699 | "Get vector index fail. ret =%d\n", vector_id); | |
3700 | return vector_id; | |
3701 | } | |
3702 | ||
63d7e66f | 3703 | ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); |
7412200c | 3704 | if (ret) |
63d7e66f SM |
3705 | dev_err(&handle->pdev->dev, |
3706 | "Unmap ring from vector fail. vectorid=%d, ret =%d\n", | |
3707 | vector_id, | |
3708 | ret); | |
46a3df9f | 3709 | |
7412200c | 3710 | return ret; |
46a3df9f S |
3711 | } |
3712 | ||
3713 | int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, | |
3714 | struct hclge_promisc_param *param) | |
3715 | { | |
d44f9b63 | 3716 | struct hclge_promisc_cfg_cmd *req; |
46a3df9f S |
3717 | struct hclge_desc desc; |
3718 | int ret; | |
3719 | ||
3720 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); | |
3721 | ||
d44f9b63 | 3722 | req = (struct hclge_promisc_cfg_cmd *)desc.data; |
46a3df9f | 3723 | req->vf_id = param->vf_id; |
4771e104 PL |
3724 | |
3725 | /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on | |
3726 | * pdev revision(0x20), new revision support them. The | |
3727 | * value of this two fields will not return error when driver | |
3728 | * send command to fireware in revision(0x20). | |
3729 | */ | |
3730 | req->flag = (param->enable << HCLGE_PROMISC_EN_B) | | |
3731 | HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; | |
46a3df9f S |
3732 | |
3733 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 3734 | if (ret) |
46a3df9f S |
3735 | dev_err(&hdev->pdev->dev, |
3736 | "Set promisc mode fail, status is %d.\n", ret); | |
90415e85 JS |
3737 | |
3738 | return ret; | |
46a3df9f S |
3739 | } |
3740 | ||
3741 | void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, | |
3742 | bool en_mc, bool en_bc, int vport_id) | |
3743 | { | |
3744 | if (!param) | |
3745 | return; | |
3746 | ||
3747 | memset(param, 0, sizeof(struct hclge_promisc_param)); | |
3748 | if (en_uc) | |
3749 | param->enable = HCLGE_PROMISC_EN_UC; | |
3750 | if (en_mc) | |
3751 | param->enable |= HCLGE_PROMISC_EN_MC; | |
3752 | if (en_bc) | |
3753 | param->enable |= HCLGE_PROMISC_EN_BC; | |
3754 | param->vf_id = vport_id; | |
3755 | } | |
3756 | ||
abe62a63 HT |
3757 | static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, |
3758 | bool en_mc_pmc) | |
46a3df9f S |
3759 | { |
3760 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3761 | struct hclge_dev *hdev = vport->back; | |
3762 | struct hclge_promisc_param param; | |
3763 | ||
e8600a3d PL |
3764 | hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, |
3765 | vport->vport_id); | |
abe62a63 | 3766 | return hclge_cmd_set_promisc_mode(hdev, ¶m); |
46a3df9f S |
3767 | } |
3768 | ||
10a954bc JS |
3769 | static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) |
3770 | { | |
3771 | struct hclge_get_fd_mode_cmd *req; | |
3772 | struct hclge_desc desc; | |
3773 | int ret; | |
3774 | ||
3775 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); | |
3776 | ||
3777 | req = (struct hclge_get_fd_mode_cmd *)desc.data; | |
3778 | ||
3779 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3780 | if (ret) { | |
3781 | dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); | |
3782 | return ret; | |
3783 | } | |
3784 | ||
3785 | *fd_mode = req->mode; | |
3786 | ||
3787 | return ret; | |
3788 | } | |
3789 | ||
3790 | static int hclge_get_fd_allocation(struct hclge_dev *hdev, | |
3791 | u32 *stage1_entry_num, | |
3792 | u32 *stage2_entry_num, | |
3793 | u16 *stage1_counter_num, | |
3794 | u16 *stage2_counter_num) | |
3795 | { | |
3796 | struct hclge_get_fd_allocation_cmd *req; | |
3797 | struct hclge_desc desc; | |
3798 | int ret; | |
3799 | ||
3800 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); | |
3801 | ||
3802 | req = (struct hclge_get_fd_allocation_cmd *)desc.data; | |
3803 | ||
3804 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3805 | if (ret) { | |
3806 | dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", | |
3807 | ret); | |
3808 | return ret; | |
3809 | } | |
3810 | ||
3811 | *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); | |
3812 | *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); | |
3813 | *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); | |
3814 | *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); | |
3815 | ||
3816 | return ret; | |
3817 | } | |
3818 | ||
3819 | static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) | |
3820 | { | |
3821 | struct hclge_set_fd_key_config_cmd *req; | |
3822 | struct hclge_fd_key_cfg *stage; | |
3823 | struct hclge_desc desc; | |
3824 | int ret; | |
3825 | ||
3826 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); | |
3827 | ||
3828 | req = (struct hclge_set_fd_key_config_cmd *)desc.data; | |
3829 | stage = &hdev->fd_cfg.key_cfg[stage_num]; | |
3830 | req->stage = stage_num; | |
3831 | req->key_select = stage->key_sel; | |
3832 | req->inner_sipv6_word_en = stage->inner_sipv6_word_en; | |
3833 | req->inner_dipv6_word_en = stage->inner_dipv6_word_en; | |
3834 | req->outer_sipv6_word_en = stage->outer_sipv6_word_en; | |
3835 | req->outer_dipv6_word_en = stage->outer_dipv6_word_en; | |
3836 | req->tuple_mask = cpu_to_le32(~stage->tuple_active); | |
3837 | req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); | |
3838 | ||
3839 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3840 | if (ret) | |
3841 | dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); | |
3842 | ||
3843 | return ret; | |
3844 | } | |
3845 | ||
3846 | static int hclge_init_fd_config(struct hclge_dev *hdev) | |
3847 | { | |
3848 | #define LOW_2_WORDS 0x03 | |
3849 | struct hclge_fd_key_cfg *key_cfg; | |
3850 | int ret; | |
3851 | ||
3852 | if (!hnae3_dev_fd_supported(hdev)) | |
3853 | return 0; | |
3854 | ||
3855 | ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); | |
3856 | if (ret) | |
3857 | return ret; | |
3858 | ||
3859 | switch (hdev->fd_cfg.fd_mode) { | |
3860 | case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: | |
3861 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; | |
3862 | break; | |
3863 | case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: | |
3864 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; | |
3865 | break; | |
3866 | default: | |
3867 | dev_err(&hdev->pdev->dev, | |
3868 | "Unsupported flow director mode %d\n", | |
3869 | hdev->fd_cfg.fd_mode); | |
3870 | return -EOPNOTSUPP; | |
3871 | } | |
3872 | ||
3873 | hdev->fd_cfg.fd_en = true; | |
3874 | hdev->fd_cfg.proto_support = | |
3875 | TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | | |
3876 | UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; | |
3877 | key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; | |
3878 | key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, | |
3879 | key_cfg->inner_sipv6_word_en = LOW_2_WORDS; | |
3880 | key_cfg->inner_dipv6_word_en = LOW_2_WORDS; | |
3881 | key_cfg->outer_sipv6_word_en = 0; | |
3882 | key_cfg->outer_dipv6_word_en = 0; | |
3883 | ||
3884 | key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | | |
3885 | BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | | |
3886 | BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
3887 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
3888 | ||
3889 | /* If use max 400bit key, we can support tuples for ether type */ | |
3890 | if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { | |
3891 | hdev->fd_cfg.proto_support |= ETHER_FLOW; | |
3892 | key_cfg->tuple_active |= | |
3893 | BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); | |
3894 | } | |
3895 | ||
3896 | /* roce_type is used to filter roce frames | |
3897 | * dst_vport is used to specify the rule | |
3898 | */ | |
3899 | key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); | |
3900 | ||
3901 | ret = hclge_get_fd_allocation(hdev, | |
3902 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], | |
3903 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], | |
3904 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], | |
3905 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); | |
3906 | if (ret) | |
3907 | return ret; | |
3908 | ||
3909 | return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); | |
3910 | } | |
3911 | ||
7b829126 JS |
3912 | static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, |
3913 | int loc, u8 *key, bool is_add) | |
3914 | { | |
3915 | struct hclge_fd_tcam_config_1_cmd *req1; | |
3916 | struct hclge_fd_tcam_config_2_cmd *req2; | |
3917 | struct hclge_fd_tcam_config_3_cmd *req3; | |
3918 | struct hclge_desc desc[3]; | |
3919 | int ret; | |
3920 | ||
3921 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); | |
3922 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3923 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); | |
3924 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3925 | hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); | |
3926 | ||
3927 | req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; | |
3928 | req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; | |
3929 | req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; | |
3930 | ||
3931 | req1->stage = stage; | |
3932 | req1->xy_sel = sel_x ? 1 : 0; | |
3933 | hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); | |
3934 | req1->index = cpu_to_le32(loc); | |
3935 | req1->entry_vld = sel_x ? is_add : 0; | |
3936 | ||
3937 | if (key) { | |
3938 | memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); | |
3939 | memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], | |
3940 | sizeof(req2->tcam_data)); | |
3941 | memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + | |
3942 | sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); | |
3943 | } | |
3944 | ||
3945 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
3946 | if (ret) | |
3947 | dev_err(&hdev->pdev->dev, | |
3948 | "config tcam key fail, ret=%d\n", | |
3949 | ret); | |
3950 | ||
3951 | return ret; | |
3952 | } | |
3953 | ||
3954 | static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, | |
3955 | struct hclge_fd_ad_data *action) | |
3956 | { | |
3957 | struct hclge_fd_ad_config_cmd *req; | |
3958 | struct hclge_desc desc; | |
3959 | u64 ad_data = 0; | |
3960 | int ret; | |
3961 | ||
3962 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); | |
3963 | ||
3964 | req = (struct hclge_fd_ad_config_cmd *)desc.data; | |
3965 | req->index = cpu_to_le32(loc); | |
3966 | req->stage = stage; | |
3967 | ||
3968 | hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, | |
3969 | action->write_rule_id_to_bd); | |
3970 | hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, | |
3971 | action->rule_id); | |
3972 | ad_data <<= 32; | |
3973 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); | |
3974 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, | |
3975 | action->forward_to_direct_queue); | |
3976 | hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, | |
3977 | action->queue_id); | |
3978 | hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); | |
3979 | hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, | |
3980 | HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); | |
3981 | hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); | |
3982 | hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, | |
3983 | action->counter_id); | |
3984 | ||
3985 | req->ad_data = cpu_to_le64(ad_data); | |
3986 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3987 | if (ret) | |
3988 | dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); | |
3989 | ||
3990 | return ret; | |
3991 | } | |
3992 | ||
3993 | static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, | |
3994 | struct hclge_fd_rule *rule) | |
3995 | { | |
3996 | u16 tmp_x_s, tmp_y_s; | |
3997 | u32 tmp_x_l, tmp_y_l; | |
3998 | int i; | |
3999 | ||
4000 | if (rule->unused_tuple & tuple_bit) | |
4001 | return true; | |
4002 | ||
4003 | switch (tuple_bit) { | |
4004 | case 0: | |
4005 | return false; | |
4006 | case BIT(INNER_DST_MAC): | |
4007 | for (i = 0; i < 6; i++) { | |
4008 | calc_x(key_x[5 - i], rule->tuples.dst_mac[i], | |
4009 | rule->tuples_mask.dst_mac[i]); | |
4010 | calc_y(key_y[5 - i], rule->tuples.dst_mac[i], | |
4011 | rule->tuples_mask.dst_mac[i]); | |
4012 | } | |
4013 | ||
4014 | return true; | |
4015 | case BIT(INNER_SRC_MAC): | |
4016 | for (i = 0; i < 6; i++) { | |
4017 | calc_x(key_x[5 - i], rule->tuples.src_mac[i], | |
4018 | rule->tuples.src_mac[i]); | |
4019 | calc_y(key_y[5 - i], rule->tuples.src_mac[i], | |
4020 | rule->tuples.src_mac[i]); | |
4021 | } | |
4022 | ||
4023 | return true; | |
4024 | case BIT(INNER_VLAN_TAG_FST): | |
4025 | calc_x(tmp_x_s, rule->tuples.vlan_tag1, | |
4026 | rule->tuples_mask.vlan_tag1); | |
4027 | calc_y(tmp_y_s, rule->tuples.vlan_tag1, | |
4028 | rule->tuples_mask.vlan_tag1); | |
4029 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4030 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4031 | ||
4032 | return true; | |
4033 | case BIT(INNER_ETH_TYPE): | |
4034 | calc_x(tmp_x_s, rule->tuples.ether_proto, | |
4035 | rule->tuples_mask.ether_proto); | |
4036 | calc_y(tmp_y_s, rule->tuples.ether_proto, | |
4037 | rule->tuples_mask.ether_proto); | |
4038 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4039 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4040 | ||
4041 | return true; | |
4042 | case BIT(INNER_IP_TOS): | |
4043 | calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
4044 | calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
4045 | ||
4046 | return true; | |
4047 | case BIT(INNER_IP_PROTO): | |
4048 | calc_x(*key_x, rule->tuples.ip_proto, | |
4049 | rule->tuples_mask.ip_proto); | |
4050 | calc_y(*key_y, rule->tuples.ip_proto, | |
4051 | rule->tuples_mask.ip_proto); | |
4052 | ||
4053 | return true; | |
4054 | case BIT(INNER_SRC_IP): | |
4055 | calc_x(tmp_x_l, rule->tuples.src_ip[3], | |
4056 | rule->tuples_mask.src_ip[3]); | |
4057 | calc_y(tmp_y_l, rule->tuples.src_ip[3], | |
4058 | rule->tuples_mask.src_ip[3]); | |
4059 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
4060 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
4061 | ||
4062 | return true; | |
4063 | case BIT(INNER_DST_IP): | |
4064 | calc_x(tmp_x_l, rule->tuples.dst_ip[3], | |
4065 | rule->tuples_mask.dst_ip[3]); | |
4066 | calc_y(tmp_y_l, rule->tuples.dst_ip[3], | |
4067 | rule->tuples_mask.dst_ip[3]); | |
4068 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
4069 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
4070 | ||
4071 | return true; | |
4072 | case BIT(INNER_SRC_PORT): | |
4073 | calc_x(tmp_x_s, rule->tuples.src_port, | |
4074 | rule->tuples_mask.src_port); | |
4075 | calc_y(tmp_y_s, rule->tuples.src_port, | |
4076 | rule->tuples_mask.src_port); | |
4077 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4078 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4079 | ||
4080 | return true; | |
4081 | case BIT(INNER_DST_PORT): | |
4082 | calc_x(tmp_x_s, rule->tuples.dst_port, | |
4083 | rule->tuples_mask.dst_port); | |
4084 | calc_y(tmp_y_s, rule->tuples.dst_port, | |
4085 | rule->tuples_mask.dst_port); | |
4086 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
4087 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
4088 | ||
4089 | return true; | |
4090 | default: | |
4091 | return false; | |
4092 | } | |
4093 | } | |
4094 | ||
4095 | static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, | |
4096 | u8 vf_id, u8 network_port_id) | |
4097 | { | |
4098 | u32 port_number = 0; | |
4099 | ||
4100 | if (port_type == HOST_PORT) { | |
4101 | hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, | |
4102 | pf_id); | |
4103 | hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, | |
4104 | vf_id); | |
4105 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); | |
4106 | } else { | |
4107 | hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, | |
4108 | HCLGE_NETWORK_PORT_ID_S, network_port_id); | |
4109 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); | |
4110 | } | |
4111 | ||
4112 | return port_number; | |
4113 | } | |
4114 | ||
4115 | static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, | |
4116 | __le32 *key_x, __le32 *key_y, | |
4117 | struct hclge_fd_rule *rule) | |
4118 | { | |
4119 | u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; | |
4120 | u8 cur_pos = 0, tuple_size, shift_bits; | |
4121 | int i; | |
4122 | ||
4123 | for (i = 0; i < MAX_META_DATA; i++) { | |
4124 | tuple_size = meta_data_key_info[i].key_length; | |
4125 | tuple_bit = key_cfg->meta_data_active & BIT(i); | |
4126 | ||
4127 | switch (tuple_bit) { | |
4128 | case BIT(ROCE_TYPE): | |
4129 | hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); | |
4130 | cur_pos += tuple_size; | |
4131 | break; | |
4132 | case BIT(DST_VPORT): | |
4133 | port_number = hclge_get_port_number(HOST_PORT, 0, | |
4134 | rule->vf_id, 0); | |
4135 | hnae3_set_field(meta_data, | |
4136 | GENMASK(cur_pos + tuple_size, cur_pos), | |
4137 | cur_pos, port_number); | |
4138 | cur_pos += tuple_size; | |
4139 | break; | |
4140 | default: | |
4141 | break; | |
4142 | } | |
4143 | } | |
4144 | ||
4145 | calc_x(tmp_x, meta_data, 0xFFFFFFFF); | |
4146 | calc_y(tmp_y, meta_data, 0xFFFFFFFF); | |
4147 | shift_bits = sizeof(meta_data) * 8 - cur_pos; | |
4148 | ||
4149 | *key_x = cpu_to_le32(tmp_x << shift_bits); | |
4150 | *key_y = cpu_to_le32(tmp_y << shift_bits); | |
4151 | } | |
4152 | ||
4153 | /* A complete key is combined with meta data key and tuple key. | |
4154 | * Meta data key is stored at the MSB region, and tuple key is stored at | |
4155 | * the LSB region, unused bits will be filled 0. | |
4156 | */ | |
4157 | static int hclge_config_key(struct hclge_dev *hdev, u8 stage, | |
4158 | struct hclge_fd_rule *rule) | |
4159 | { | |
4160 | struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; | |
4161 | u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; | |
4162 | u8 *cur_key_x, *cur_key_y; | |
4163 | int i, ret, tuple_size; | |
4164 | u8 meta_data_region; | |
4165 | ||
4166 | memset(key_x, 0, sizeof(key_x)); | |
4167 | memset(key_y, 0, sizeof(key_y)); | |
4168 | cur_key_x = key_x; | |
4169 | cur_key_y = key_y; | |
4170 | ||
4171 | for (i = 0 ; i < MAX_TUPLE; i++) { | |
4172 | bool tuple_valid; | |
4173 | u32 check_tuple; | |
4174 | ||
4175 | tuple_size = tuple_key_info[i].key_length / 8; | |
4176 | check_tuple = key_cfg->tuple_active & BIT(i); | |
4177 | ||
4178 | tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, | |
4179 | cur_key_y, rule); | |
4180 | if (tuple_valid) { | |
4181 | cur_key_x += tuple_size; | |
4182 | cur_key_y += tuple_size; | |
4183 | } | |
4184 | } | |
4185 | ||
4186 | meta_data_region = hdev->fd_cfg.max_key_length / 8 - | |
4187 | MAX_META_DATA_LENGTH / 8; | |
4188 | ||
4189 | hclge_fd_convert_meta_data(key_cfg, | |
4190 | (__le32 *)(key_x + meta_data_region), | |
4191 | (__le32 *)(key_y + meta_data_region), | |
4192 | rule); | |
4193 | ||
4194 | ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, | |
4195 | true); | |
4196 | if (ret) { | |
4197 | dev_err(&hdev->pdev->dev, | |
4198 | "fd key_y config fail, loc=%d, ret=%d\n", | |
4199 | rule->queue_id, ret); | |
4200 | return ret; | |
4201 | } | |
4202 | ||
4203 | ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, | |
4204 | true); | |
4205 | if (ret) | |
4206 | dev_err(&hdev->pdev->dev, | |
4207 | "fd key_x config fail, loc=%d, ret=%d\n", | |
4208 | rule->queue_id, ret); | |
4209 | return ret; | |
4210 | } | |
4211 | ||
4212 | static int hclge_config_action(struct hclge_dev *hdev, u8 stage, | |
4213 | struct hclge_fd_rule *rule) | |
4214 | { | |
4215 | struct hclge_fd_ad_data ad_data; | |
4216 | ||
4217 | ad_data.ad_id = rule->location; | |
4218 | ||
4219 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
4220 | ad_data.drop_packet = true; | |
4221 | ad_data.forward_to_direct_queue = false; | |
4222 | ad_data.queue_id = 0; | |
4223 | } else { | |
4224 | ad_data.drop_packet = false; | |
4225 | ad_data.forward_to_direct_queue = true; | |
4226 | ad_data.queue_id = rule->queue_id; | |
4227 | } | |
4228 | ||
4229 | ad_data.use_counter = false; | |
4230 | ad_data.counter_id = 0; | |
4231 | ||
4232 | ad_data.use_next_stage = false; | |
4233 | ad_data.next_input_key = 0; | |
4234 | ||
4235 | ad_data.write_rule_id_to_bd = true; | |
4236 | ad_data.rule_id = rule->location; | |
4237 | ||
4238 | return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); | |
4239 | } | |
4240 | ||
3ca8e27c JS |
4241 | static int hclge_fd_check_spec(struct hclge_dev *hdev, |
4242 | struct ethtool_rx_flow_spec *fs, u32 *unused) | |
4243 | { | |
4244 | struct ethtool_tcpip4_spec *tcp_ip4_spec; | |
4245 | struct ethtool_usrip4_spec *usr_ip4_spec; | |
4246 | struct ethtool_tcpip6_spec *tcp_ip6_spec; | |
4247 | struct ethtool_usrip6_spec *usr_ip6_spec; | |
4248 | struct ethhdr *ether_spec; | |
4249 | ||
4250 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4251 | return -EINVAL; | |
4252 | ||
4253 | if (!(fs->flow_type & hdev->fd_cfg.proto_support)) | |
4254 | return -EOPNOTSUPP; | |
4255 | ||
4256 | if ((fs->flow_type & FLOW_EXT) && | |
4257 | (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { | |
4258 | dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); | |
4259 | return -EOPNOTSUPP; | |
4260 | } | |
4261 | ||
4262 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4263 | case SCTP_V4_FLOW: | |
4264 | case TCP_V4_FLOW: | |
4265 | case UDP_V4_FLOW: | |
4266 | tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; | |
4267 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); | |
4268 | ||
4269 | if (!tcp_ip4_spec->ip4src) | |
4270 | *unused |= BIT(INNER_SRC_IP); | |
4271 | ||
4272 | if (!tcp_ip4_spec->ip4dst) | |
4273 | *unused |= BIT(INNER_DST_IP); | |
4274 | ||
4275 | if (!tcp_ip4_spec->psrc) | |
4276 | *unused |= BIT(INNER_SRC_PORT); | |
4277 | ||
4278 | if (!tcp_ip4_spec->pdst) | |
4279 | *unused |= BIT(INNER_DST_PORT); | |
4280 | ||
4281 | if (!tcp_ip4_spec->tos) | |
4282 | *unused |= BIT(INNER_IP_TOS); | |
4283 | ||
4284 | break; | |
4285 | case IP_USER_FLOW: | |
4286 | usr_ip4_spec = &fs->h_u.usr_ip4_spec; | |
4287 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4288 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
4289 | ||
4290 | if (!usr_ip4_spec->ip4src) | |
4291 | *unused |= BIT(INNER_SRC_IP); | |
4292 | ||
4293 | if (!usr_ip4_spec->ip4dst) | |
4294 | *unused |= BIT(INNER_DST_IP); | |
4295 | ||
4296 | if (!usr_ip4_spec->tos) | |
4297 | *unused |= BIT(INNER_IP_TOS); | |
4298 | ||
4299 | if (!usr_ip4_spec->proto) | |
4300 | *unused |= BIT(INNER_IP_PROTO); | |
4301 | ||
4302 | if (usr_ip4_spec->l4_4_bytes) | |
4303 | return -EOPNOTSUPP; | |
4304 | ||
4305 | if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) | |
4306 | return -EOPNOTSUPP; | |
4307 | ||
4308 | break; | |
4309 | case SCTP_V6_FLOW: | |
4310 | case TCP_V6_FLOW: | |
4311 | case UDP_V6_FLOW: | |
4312 | tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; | |
4313 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4314 | BIT(INNER_IP_TOS); | |
4315 | ||
4316 | if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && | |
4317 | !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) | |
4318 | *unused |= BIT(INNER_SRC_IP); | |
4319 | ||
4320 | if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && | |
4321 | !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) | |
4322 | *unused |= BIT(INNER_DST_IP); | |
4323 | ||
4324 | if (!tcp_ip6_spec->psrc) | |
4325 | *unused |= BIT(INNER_SRC_PORT); | |
4326 | ||
4327 | if (!tcp_ip6_spec->pdst) | |
4328 | *unused |= BIT(INNER_DST_PORT); | |
4329 | ||
4330 | if (tcp_ip6_spec->tclass) | |
4331 | return -EOPNOTSUPP; | |
4332 | ||
4333 | break; | |
4334 | case IPV6_USER_FLOW: | |
4335 | usr_ip6_spec = &fs->h_u.usr_ip6_spec; | |
4336 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4337 | BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | | |
4338 | BIT(INNER_DST_PORT); | |
4339 | ||
4340 | if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && | |
4341 | !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) | |
4342 | *unused |= BIT(INNER_SRC_IP); | |
4343 | ||
4344 | if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && | |
4345 | !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) | |
4346 | *unused |= BIT(INNER_DST_IP); | |
4347 | ||
4348 | if (!usr_ip6_spec->l4_proto) | |
4349 | *unused |= BIT(INNER_IP_PROTO); | |
4350 | ||
4351 | if (usr_ip6_spec->tclass) | |
4352 | return -EOPNOTSUPP; | |
4353 | ||
4354 | if (usr_ip6_spec->l4_4_bytes) | |
4355 | return -EOPNOTSUPP; | |
4356 | ||
4357 | break; | |
4358 | case ETHER_FLOW: | |
4359 | ether_spec = &fs->h_u.ether_spec; | |
4360 | *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
4361 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | | |
4362 | BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); | |
4363 | ||
4364 | if (is_zero_ether_addr(ether_spec->h_source)) | |
4365 | *unused |= BIT(INNER_SRC_MAC); | |
4366 | ||
4367 | if (is_zero_ether_addr(ether_spec->h_dest)) | |
4368 | *unused |= BIT(INNER_DST_MAC); | |
4369 | ||
4370 | if (!ether_spec->h_proto) | |
4371 | *unused |= BIT(INNER_ETH_TYPE); | |
4372 | ||
4373 | break; | |
4374 | default: | |
4375 | return -EOPNOTSUPP; | |
4376 | } | |
4377 | ||
4378 | if ((fs->flow_type & FLOW_EXT)) { | |
4379 | if (fs->h_ext.vlan_etype) | |
4380 | return -EOPNOTSUPP; | |
4381 | if (!fs->h_ext.vlan_tci) | |
4382 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4383 | ||
4384 | if (fs->m_ext.vlan_tci) { | |
4385 | if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) | |
4386 | return -EINVAL; | |
4387 | } | |
4388 | } else { | |
4389 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4390 | } | |
4391 | ||
4392 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4393 | if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) | |
4394 | return -EOPNOTSUPP; | |
4395 | ||
4396 | if (is_zero_ether_addr(fs->h_ext.h_dest)) | |
4397 | *unused |= BIT(INNER_DST_MAC); | |
4398 | else | |
4399 | *unused &= ~(BIT(INNER_DST_MAC)); | |
4400 | } | |
4401 | ||
4402 | return 0; | |
4403 | } | |
4404 | ||
4405 | static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) | |
4406 | { | |
4407 | struct hclge_fd_rule *rule = NULL; | |
4408 | struct hlist_node *node2; | |
4409 | ||
4410 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4411 | if (rule->location >= location) | |
4412 | break; | |
4413 | } | |
4414 | ||
4415 | return rule && rule->location == location; | |
4416 | } | |
4417 | ||
4418 | static int hclge_fd_update_rule_list(struct hclge_dev *hdev, | |
4419 | struct hclge_fd_rule *new_rule, | |
4420 | u16 location, | |
4421 | bool is_add) | |
4422 | { | |
4423 | struct hclge_fd_rule *rule = NULL, *parent = NULL; | |
4424 | struct hlist_node *node2; | |
4425 | ||
4426 | if (is_add && !new_rule) | |
4427 | return -EINVAL; | |
4428 | ||
4429 | hlist_for_each_entry_safe(rule, node2, | |
4430 | &hdev->fd_rule_list, rule_node) { | |
4431 | if (rule->location >= location) | |
4432 | break; | |
4433 | parent = rule; | |
4434 | } | |
4435 | ||
4436 | if (rule && rule->location == location) { | |
4437 | hlist_del(&rule->rule_node); | |
4438 | kfree(rule); | |
4439 | hdev->hclge_fd_rule_num--; | |
4440 | ||
4441 | if (!is_add) | |
4442 | return 0; | |
4443 | ||
4444 | } else if (!is_add) { | |
4445 | dev_err(&hdev->pdev->dev, | |
4446 | "delete fail, rule %d is inexistent\n", | |
4447 | location); | |
4448 | return -EINVAL; | |
4449 | } | |
4450 | ||
4451 | INIT_HLIST_NODE(&new_rule->rule_node); | |
4452 | ||
4453 | if (parent) | |
4454 | hlist_add_behind(&new_rule->rule_node, &parent->rule_node); | |
4455 | else | |
4456 | hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); | |
4457 | ||
4458 | hdev->hclge_fd_rule_num++; | |
4459 | ||
4460 | return 0; | |
4461 | } | |
4462 | ||
4463 | static int hclge_fd_get_tuple(struct hclge_dev *hdev, | |
4464 | struct ethtool_rx_flow_spec *fs, | |
4465 | struct hclge_fd_rule *rule) | |
4466 | { | |
4467 | u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); | |
4468 | ||
4469 | switch (flow_type) { | |
4470 | case SCTP_V4_FLOW: | |
4471 | case TCP_V4_FLOW: | |
4472 | case UDP_V4_FLOW: | |
4473 | rule->tuples.src_ip[3] = | |
4474 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); | |
4475 | rule->tuples_mask.src_ip[3] = | |
4476 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); | |
4477 | ||
4478 | rule->tuples.dst_ip[3] = | |
4479 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); | |
4480 | rule->tuples_mask.dst_ip[3] = | |
4481 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); | |
4482 | ||
4483 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); | |
4484 | rule->tuples_mask.src_port = | |
4485 | be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); | |
4486 | ||
4487 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); | |
4488 | rule->tuples_mask.dst_port = | |
4489 | be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); | |
4490 | ||
4491 | rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; | |
4492 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; | |
4493 | ||
4494 | rule->tuples.ether_proto = ETH_P_IP; | |
4495 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4496 | ||
4497 | break; | |
4498 | case IP_USER_FLOW: | |
4499 | rule->tuples.src_ip[3] = | |
4500 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); | |
4501 | rule->tuples_mask.src_ip[3] = | |
4502 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); | |
4503 | ||
4504 | rule->tuples.dst_ip[3] = | |
4505 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); | |
4506 | rule->tuples_mask.dst_ip[3] = | |
4507 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); | |
4508 | ||
4509 | rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; | |
4510 | rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; | |
4511 | ||
4512 | rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; | |
4513 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; | |
4514 | ||
4515 | rule->tuples.ether_proto = ETH_P_IP; | |
4516 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4517 | ||
4518 | break; | |
4519 | case SCTP_V6_FLOW: | |
4520 | case TCP_V6_FLOW: | |
4521 | case UDP_V6_FLOW: | |
4522 | be32_to_cpu_array(rule->tuples.src_ip, | |
4523 | fs->h_u.tcp_ip6_spec.ip6src, 4); | |
4524 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4525 | fs->m_u.tcp_ip6_spec.ip6src, 4); | |
4526 | ||
4527 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4528 | fs->h_u.tcp_ip6_spec.ip6dst, 4); | |
4529 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4530 | fs->m_u.tcp_ip6_spec.ip6dst, 4); | |
4531 | ||
4532 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); | |
4533 | rule->tuples_mask.src_port = | |
4534 | be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); | |
4535 | ||
4536 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); | |
4537 | rule->tuples_mask.dst_port = | |
4538 | be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); | |
4539 | ||
4540 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4541 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4542 | ||
4543 | break; | |
4544 | case IPV6_USER_FLOW: | |
4545 | be32_to_cpu_array(rule->tuples.src_ip, | |
4546 | fs->h_u.usr_ip6_spec.ip6src, 4); | |
4547 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4548 | fs->m_u.usr_ip6_spec.ip6src, 4); | |
4549 | ||
4550 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4551 | fs->h_u.usr_ip6_spec.ip6dst, 4); | |
4552 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4553 | fs->m_u.usr_ip6_spec.ip6dst, 4); | |
4554 | ||
4555 | rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; | |
4556 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; | |
4557 | ||
4558 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4559 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4560 | ||
4561 | break; | |
4562 | case ETHER_FLOW: | |
4563 | ether_addr_copy(rule->tuples.src_mac, | |
4564 | fs->h_u.ether_spec.h_source); | |
4565 | ether_addr_copy(rule->tuples_mask.src_mac, | |
4566 | fs->m_u.ether_spec.h_source); | |
4567 | ||
4568 | ether_addr_copy(rule->tuples.dst_mac, | |
4569 | fs->h_u.ether_spec.h_dest); | |
4570 | ether_addr_copy(rule->tuples_mask.dst_mac, | |
4571 | fs->m_u.ether_spec.h_dest); | |
4572 | ||
4573 | rule->tuples.ether_proto = | |
4574 | be16_to_cpu(fs->h_u.ether_spec.h_proto); | |
4575 | rule->tuples_mask.ether_proto = | |
4576 | be16_to_cpu(fs->m_u.ether_spec.h_proto); | |
4577 | ||
4578 | break; | |
4579 | default: | |
4580 | return -EOPNOTSUPP; | |
4581 | } | |
4582 | ||
4583 | switch (flow_type) { | |
4584 | case SCTP_V4_FLOW: | |
4585 | case SCTP_V6_FLOW: | |
4586 | rule->tuples.ip_proto = IPPROTO_SCTP; | |
4587 | rule->tuples_mask.ip_proto = 0xFF; | |
4588 | break; | |
4589 | case TCP_V4_FLOW: | |
4590 | case TCP_V6_FLOW: | |
4591 | rule->tuples.ip_proto = IPPROTO_TCP; | |
4592 | rule->tuples_mask.ip_proto = 0xFF; | |
4593 | break; | |
4594 | case UDP_V4_FLOW: | |
4595 | case UDP_V6_FLOW: | |
4596 | rule->tuples.ip_proto = IPPROTO_UDP; | |
4597 | rule->tuples_mask.ip_proto = 0xFF; | |
4598 | break; | |
4599 | default: | |
4600 | break; | |
4601 | } | |
4602 | ||
4603 | if ((fs->flow_type & FLOW_EXT)) { | |
4604 | rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); | |
4605 | rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); | |
4606 | } | |
4607 | ||
4608 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4609 | ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); | |
4610 | ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); | |
4611 | } | |
4612 | ||
4613 | return 0; | |
4614 | } | |
4615 | ||
4616 | static int hclge_add_fd_entry(struct hnae3_handle *handle, | |
4617 | struct ethtool_rxnfc *cmd) | |
4618 | { | |
4619 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4620 | struct hclge_dev *hdev = vport->back; | |
4621 | u16 dst_vport_id = 0, q_index = 0; | |
4622 | struct ethtool_rx_flow_spec *fs; | |
4623 | struct hclge_fd_rule *rule; | |
4624 | u32 unused = 0; | |
4625 | u8 action; | |
4626 | int ret; | |
4627 | ||
4628 | if (!hnae3_dev_fd_supported(hdev)) | |
4629 | return -EOPNOTSUPP; | |
4630 | ||
4631 | if (!hdev->fd_cfg.fd_en) { | |
4632 | dev_warn(&hdev->pdev->dev, | |
4633 | "Please enable flow director first\n"); | |
4634 | return -EOPNOTSUPP; | |
4635 | } | |
4636 | ||
4637 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4638 | ||
4639 | ret = hclge_fd_check_spec(hdev, fs, &unused); | |
4640 | if (ret) { | |
4641 | dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); | |
4642 | return ret; | |
4643 | } | |
4644 | ||
4645 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) { | |
4646 | action = HCLGE_FD_ACTION_DROP_PACKET; | |
4647 | } else { | |
4648 | u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); | |
4649 | u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); | |
4650 | u16 tqps; | |
4651 | ||
4652 | dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; | |
4653 | tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; | |
4654 | ||
4655 | if (ring >= tqps) { | |
4656 | dev_err(&hdev->pdev->dev, | |
4657 | "Error: queue id (%d) > max tqp num (%d)\n", | |
4658 | ring, tqps - 1); | |
4659 | return -EINVAL; | |
4660 | } | |
4661 | ||
4662 | if (vf > hdev->num_req_vfs) { | |
4663 | dev_err(&hdev->pdev->dev, | |
4664 | "Error: vf id (%d) > max vf num (%d)\n", | |
4665 | vf, hdev->num_req_vfs); | |
4666 | return -EINVAL; | |
4667 | } | |
4668 | ||
4669 | action = HCLGE_FD_ACTION_ACCEPT_PACKET; | |
4670 | q_index = ring; | |
4671 | } | |
4672 | ||
4673 | rule = kzalloc(sizeof(*rule), GFP_KERNEL); | |
4674 | if (!rule) | |
4675 | return -ENOMEM; | |
4676 | ||
4677 | ret = hclge_fd_get_tuple(hdev, fs, rule); | |
4678 | if (ret) | |
4679 | goto free_rule; | |
4680 | ||
4681 | rule->flow_type = fs->flow_type; | |
4682 | ||
4683 | rule->location = fs->location; | |
4684 | rule->unused_tuple = unused; | |
4685 | rule->vf_id = dst_vport_id; | |
4686 | rule->queue_id = q_index; | |
4687 | rule->action = action; | |
4688 | ||
4689 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4690 | if (ret) | |
4691 | goto free_rule; | |
4692 | ||
4693 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4694 | if (ret) | |
4695 | goto free_rule; | |
4696 | ||
4697 | ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); | |
4698 | if (ret) | |
4699 | goto free_rule; | |
4700 | ||
4701 | return ret; | |
4702 | ||
4703 | free_rule: | |
4704 | kfree(rule); | |
4705 | return ret; | |
4706 | } | |
4707 | ||
4708 | static int hclge_del_fd_entry(struct hnae3_handle *handle, | |
4709 | struct ethtool_rxnfc *cmd) | |
4710 | { | |
4711 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4712 | struct hclge_dev *hdev = vport->back; | |
4713 | struct ethtool_rx_flow_spec *fs; | |
4714 | int ret; | |
4715 | ||
4716 | if (!hnae3_dev_fd_supported(hdev)) | |
4717 | return -EOPNOTSUPP; | |
4718 | ||
4719 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4720 | ||
4721 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4722 | return -EINVAL; | |
4723 | ||
4724 | if (!hclge_fd_rule_exist(hdev, fs->location)) { | |
4725 | dev_err(&hdev->pdev->dev, | |
4726 | "Delete fail, rule %d is inexistent\n", | |
4727 | fs->location); | |
4728 | return -ENOENT; | |
4729 | } | |
4730 | ||
4731 | ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4732 | fs->location, NULL, false); | |
4733 | if (ret) | |
4734 | return ret; | |
4735 | ||
4736 | return hclge_fd_update_rule_list(hdev, NULL, fs->location, | |
4737 | false); | |
4738 | } | |
4739 | ||
7ce98982 JS |
4740 | static void hclge_del_all_fd_entries(struct hnae3_handle *handle, |
4741 | bool clear_list) | |
4742 | { | |
4743 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4744 | struct hclge_dev *hdev = vport->back; | |
4745 | struct hclge_fd_rule *rule; | |
4746 | struct hlist_node *node; | |
4747 | ||
4748 | if (!hnae3_dev_fd_supported(hdev)) | |
4749 | return; | |
4750 | ||
4751 | if (clear_list) { | |
4752 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4753 | rule_node) { | |
4754 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4755 | rule->location, NULL, false); | |
4756 | hlist_del(&rule->rule_node); | |
4757 | kfree(rule); | |
4758 | hdev->hclge_fd_rule_num--; | |
4759 | } | |
4760 | } else { | |
4761 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4762 | rule_node) | |
4763 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4764 | rule->location, NULL, false); | |
4765 | } | |
4766 | } | |
4767 | ||
4768 | static int hclge_restore_fd_entries(struct hnae3_handle *handle) | |
4769 | { | |
4770 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4771 | struct hclge_dev *hdev = vport->back; | |
4772 | struct hclge_fd_rule *rule; | |
4773 | struct hlist_node *node; | |
4774 | int ret; | |
4775 | ||
1afdb53a HT |
4776 | /* Return ok here, because reset error handling will check this |
4777 | * return value. If error is returned here, the reset process will | |
4778 | * fail. | |
4779 | */ | |
7ce98982 | 4780 | if (!hnae3_dev_fd_supported(hdev)) |
1afdb53a | 4781 | return 0; |
7ce98982 JS |
4782 | |
4783 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { | |
4784 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4785 | if (!ret) | |
4786 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4787 | ||
4788 | if (ret) { | |
4789 | dev_warn(&hdev->pdev->dev, | |
4790 | "Restore rule %d failed, remove it\n", | |
4791 | rule->location); | |
4792 | hlist_del(&rule->rule_node); | |
4793 | kfree(rule); | |
4794 | hdev->hclge_fd_rule_num--; | |
4795 | } | |
4796 | } | |
4797 | return 0; | |
4798 | } | |
4799 | ||
295043a7 JS |
4800 | static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, |
4801 | struct ethtool_rxnfc *cmd) | |
4802 | { | |
4803 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4804 | struct hclge_dev *hdev = vport->back; | |
4805 | ||
4806 | if (!hnae3_dev_fd_supported(hdev)) | |
4807 | return -EOPNOTSUPP; | |
4808 | ||
4809 | cmd->rule_cnt = hdev->hclge_fd_rule_num; | |
4810 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
4811 | ||
4812 | return 0; | |
4813 | } | |
4814 | ||
4815 | static int hclge_get_fd_rule_info(struct hnae3_handle *handle, | |
4816 | struct ethtool_rxnfc *cmd) | |
4817 | { | |
4818 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4819 | struct hclge_fd_rule *rule = NULL; | |
4820 | struct hclge_dev *hdev = vport->back; | |
4821 | struct ethtool_rx_flow_spec *fs; | |
4822 | struct hlist_node *node2; | |
4823 | ||
4824 | if (!hnae3_dev_fd_supported(hdev)) | |
4825 | return -EOPNOTSUPP; | |
4826 | ||
4827 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4828 | ||
4829 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4830 | if (rule->location >= fs->location) | |
4831 | break; | |
4832 | } | |
4833 | ||
4834 | if (!rule || fs->location != rule->location) | |
4835 | return -ENOENT; | |
4836 | ||
4837 | fs->flow_type = rule->flow_type; | |
4838 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4839 | case SCTP_V4_FLOW: | |
4840 | case TCP_V4_FLOW: | |
4841 | case UDP_V4_FLOW: | |
4842 | fs->h_u.tcp_ip4_spec.ip4src = | |
4843 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4844 | fs->m_u.tcp_ip4_spec.ip4src = | |
4845 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4846 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4847 | ||
4848 | fs->h_u.tcp_ip4_spec.ip4dst = | |
4849 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4850 | fs->m_u.tcp_ip4_spec.ip4dst = | |
4851 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4852 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4853 | ||
4854 | fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4855 | fs->m_u.tcp_ip4_spec.psrc = | |
4856 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4857 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4858 | ||
4859 | fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4860 | fs->m_u.tcp_ip4_spec.pdst = | |
4861 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4862 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4863 | ||
4864 | fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; | |
4865 | fs->m_u.tcp_ip4_spec.tos = | |
4866 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4867 | 0 : rule->tuples_mask.ip_tos; | |
4868 | ||
4869 | break; | |
4870 | case IP_USER_FLOW: | |
4871 | fs->h_u.usr_ip4_spec.ip4src = | |
4872 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4873 | fs->m_u.tcp_ip4_spec.ip4src = | |
4874 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4875 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4876 | ||
4877 | fs->h_u.usr_ip4_spec.ip4dst = | |
4878 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4879 | fs->m_u.usr_ip4_spec.ip4dst = | |
4880 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4881 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4882 | ||
4883 | fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; | |
4884 | fs->m_u.usr_ip4_spec.tos = | |
4885 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4886 | 0 : rule->tuples_mask.ip_tos; | |
4887 | ||
4888 | fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; | |
4889 | fs->m_u.usr_ip4_spec.proto = | |
4890 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4891 | 0 : rule->tuples_mask.ip_proto; | |
4892 | ||
4893 | fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; | |
4894 | ||
4895 | break; | |
4896 | case SCTP_V6_FLOW: | |
4897 | case TCP_V6_FLOW: | |
4898 | case UDP_V6_FLOW: | |
4899 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, | |
4900 | rule->tuples.src_ip, 4); | |
4901 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4902 | memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4903 | else | |
4904 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, | |
4905 | rule->tuples_mask.src_ip, 4); | |
4906 | ||
4907 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, | |
4908 | rule->tuples.dst_ip, 4); | |
4909 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4910 | memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4911 | else | |
4912 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, | |
4913 | rule->tuples_mask.dst_ip, 4); | |
4914 | ||
4915 | fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4916 | fs->m_u.tcp_ip6_spec.psrc = | |
4917 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4918 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4919 | ||
4920 | fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4921 | fs->m_u.tcp_ip6_spec.pdst = | |
4922 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4923 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4924 | ||
4925 | break; | |
4926 | case IPV6_USER_FLOW: | |
4927 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, | |
4928 | rule->tuples.src_ip, 4); | |
4929 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4930 | memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4931 | else | |
4932 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, | |
4933 | rule->tuples_mask.src_ip, 4); | |
4934 | ||
4935 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, | |
4936 | rule->tuples.dst_ip, 4); | |
4937 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4938 | memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4939 | else | |
4940 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, | |
4941 | rule->tuples_mask.dst_ip, 4); | |
4942 | ||
4943 | fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; | |
4944 | fs->m_u.usr_ip6_spec.l4_proto = | |
4945 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4946 | 0 : rule->tuples_mask.ip_proto; | |
4947 | ||
4948 | break; | |
4949 | case ETHER_FLOW: | |
4950 | ether_addr_copy(fs->h_u.ether_spec.h_source, | |
4951 | rule->tuples.src_mac); | |
4952 | if (rule->unused_tuple & BIT(INNER_SRC_MAC)) | |
4953 | eth_zero_addr(fs->m_u.ether_spec.h_source); | |
4954 | else | |
4955 | ether_addr_copy(fs->m_u.ether_spec.h_source, | |
4956 | rule->tuples_mask.src_mac); | |
4957 | ||
4958 | ether_addr_copy(fs->h_u.ether_spec.h_dest, | |
4959 | rule->tuples.dst_mac); | |
4960 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4961 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4962 | else | |
4963 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4964 | rule->tuples_mask.dst_mac); | |
4965 | ||
4966 | fs->h_u.ether_spec.h_proto = | |
4967 | cpu_to_be16(rule->tuples.ether_proto); | |
4968 | fs->m_u.ether_spec.h_proto = | |
4969 | rule->unused_tuple & BIT(INNER_ETH_TYPE) ? | |
4970 | 0 : cpu_to_be16(rule->tuples_mask.ether_proto); | |
4971 | ||
4972 | break; | |
4973 | default: | |
4974 | return -EOPNOTSUPP; | |
4975 | } | |
4976 | ||
4977 | if (fs->flow_type & FLOW_EXT) { | |
4978 | fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); | |
4979 | fs->m_ext.vlan_tci = | |
4980 | rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? | |
4981 | cpu_to_be16(VLAN_VID_MASK) : | |
4982 | cpu_to_be16(rule->tuples_mask.vlan_tag1); | |
4983 | } | |
4984 | ||
4985 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4986 | ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); | |
4987 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4988 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4989 | else | |
4990 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4991 | rule->tuples_mask.dst_mac); | |
4992 | } | |
4993 | ||
4994 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
4995 | fs->ring_cookie = RX_CLS_FLOW_DISC; | |
4996 | } else { | |
4997 | u64 vf_id; | |
4998 | ||
4999 | fs->ring_cookie = rule->queue_id; | |
5000 | vf_id = rule->vf_id; | |
5001 | vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; | |
5002 | fs->ring_cookie |= vf_id; | |
5003 | } | |
5004 | ||
5005 | return 0; | |
5006 | } | |
5007 | ||
5008 | static int hclge_get_all_rules(struct hnae3_handle *handle, | |
5009 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | |
5010 | { | |
5011 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5012 | struct hclge_dev *hdev = vport->back; | |
5013 | struct hclge_fd_rule *rule; | |
5014 | struct hlist_node *node2; | |
5015 | int cnt = 0; | |
5016 | ||
5017 | if (!hnae3_dev_fd_supported(hdev)) | |
5018 | return -EOPNOTSUPP; | |
5019 | ||
5020 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
5021 | ||
5022 | hlist_for_each_entry_safe(rule, node2, | |
5023 | &hdev->fd_rule_list, rule_node) { | |
5024 | if (cnt == cmd->rule_cnt) | |
5025 | return -EMSGSIZE; | |
5026 | ||
5027 | rule_locs[cnt] = rule->location; | |
5028 | cnt++; | |
5029 | } | |
5030 | ||
5031 | cmd->rule_cnt = cnt; | |
5032 | ||
5033 | return 0; | |
5034 | } | |
5035 | ||
225c02eb HT |
5036 | static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) |
5037 | { | |
5038 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5039 | struct hclge_dev *hdev = vport->back; | |
5040 | ||
5041 | return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || | |
5042 | hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); | |
5043 | } | |
5044 | ||
5045 | static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) | |
5046 | { | |
5047 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5048 | struct hclge_dev *hdev = vport->back; | |
5049 | ||
5050 | return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
5051 | } | |
5052 | ||
5053 | static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) | |
5054 | { | |
5055 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5056 | struct hclge_dev *hdev = vport->back; | |
5057 | ||
5058 | return hdev->reset_count; | |
5059 | } | |
5060 | ||
d1f04a80 JS |
5061 | static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) |
5062 | { | |
5063 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5064 | struct hclge_dev *hdev = vport->back; | |
5065 | ||
5066 | hdev->fd_cfg.fd_en = enable; | |
5067 | if (!enable) | |
5068 | hclge_del_all_fd_entries(handle, false); | |
5069 | else | |
5070 | hclge_restore_fd_entries(handle); | |
5071 | } | |
5072 | ||
46a3df9f S |
5073 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) |
5074 | { | |
5075 | struct hclge_desc desc; | |
d44f9b63 YL |
5076 | struct hclge_config_mac_mode_cmd *req = |
5077 | (struct hclge_config_mac_mode_cmd *)desc.data; | |
a90bb9a5 | 5078 | u32 loop_en = 0; |
46a3df9f S |
5079 | int ret; |
5080 | ||
5081 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); | |
ccc23ef3 PL |
5082 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); |
5083 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); | |
5084 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); | |
5085 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); | |
5086 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); | |
5087 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); | |
5088 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); | |
5089 | hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); | |
5090 | hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); | |
5091 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); | |
5092 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); | |
5093 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); | |
5094 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); | |
5095 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); | |
a90bb9a5 | 5096 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); |
46a3df9f S |
5097 | |
5098 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5099 | if (ret) | |
5100 | dev_err(&hdev->pdev->dev, | |
5101 | "mac enable fail, ret =%d.\n", ret); | |
5102 | } | |
5103 | ||
67b8c316 | 5104 | static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) |
c39c4d98 | 5105 | { |
c39c4d98 | 5106 | struct hclge_config_mac_mode_cmd *req; |
c39c4d98 YL |
5107 | struct hclge_desc desc; |
5108 | u32 loop_en; | |
5109 | int ret; | |
5110 | ||
e67d9ce9 YL |
5111 | req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; |
5112 | /* 1 Read out the MAC mode config at first */ | |
5113 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); | |
5114 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5115 | if (ret) { | |
5116 | dev_err(&hdev->pdev->dev, | |
5117 | "mac loopback get fail, ret =%d.\n", ret); | |
5118 | return ret; | |
5119 | } | |
c39c4d98 | 5120 | |
e67d9ce9 YL |
5121 | /* 2 Then setup the loopback flag */ |
5122 | loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); | |
ccc23ef3 | 5123 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); |
3ebc5e0b YL |
5124 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); |
5125 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); | |
e67d9ce9 YL |
5126 | |
5127 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); | |
c39c4d98 | 5128 | |
e67d9ce9 YL |
5129 | /* 3 Config mac work mode with loopback flag |
5130 | * and its original configure parameters | |
5131 | */ | |
5132 | hclge_cmd_reuse_desc(&desc, false); | |
5133 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5134 | if (ret) | |
5135 | dev_err(&hdev->pdev->dev, | |
5136 | "mac loopback set fail, ret =%d.\n", ret); | |
5137 | return ret; | |
5138 | } | |
c39c4d98 | 5139 | |
86957272 FL |
5140 | static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, |
5141 | enum hnae3_loop loop_mode) | |
e006bb00 PL |
5142 | { |
5143 | #define HCLGE_SERDES_RETRY_MS 10 | |
5144 | #define HCLGE_SERDES_RETRY_NUM 100 | |
5145 | struct hclge_serdes_lb_cmd *req; | |
5146 | struct hclge_desc desc; | |
5147 | int ret, i = 0; | |
86957272 | 5148 | u8 loop_mode_b; |
e006bb00 | 5149 | |
855f03fb | 5150 | req = (struct hclge_serdes_lb_cmd *)desc.data; |
e006bb00 PL |
5151 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); |
5152 | ||
86957272 FL |
5153 | switch (loop_mode) { |
5154 | case HNAE3_LOOP_SERIAL_SERDES: | |
5155 | loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; | |
5156 | break; | |
5157 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5158 | loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; | |
5159 | break; | |
5160 | default: | |
5161 | dev_err(&hdev->pdev->dev, | |
5162 | "unsupported serdes loopback mode %d\n", loop_mode); | |
5163 | return -ENOTSUPP; | |
5164 | } | |
5165 | ||
e006bb00 | 5166 | if (en) { |
86957272 FL |
5167 | req->enable = loop_mode_b; |
5168 | req->mask = loop_mode_b; | |
e006bb00 | 5169 | } else { |
86957272 | 5170 | req->mask = loop_mode_b; |
e006bb00 PL |
5171 | } |
5172 | ||
5173 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5174 | if (ret) { | |
5175 | dev_err(&hdev->pdev->dev, | |
5176 | "serdes loopback set fail, ret = %d\n", ret); | |
5177 | return ret; | |
5178 | } | |
5179 | ||
5180 | do { | |
5181 | msleep(HCLGE_SERDES_RETRY_MS); | |
5182 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, | |
5183 | true); | |
5184 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5185 | if (ret) { | |
5186 | dev_err(&hdev->pdev->dev, | |
5187 | "serdes loopback get, ret = %d\n", ret); | |
5188 | return ret; | |
5189 | } | |
5190 | } while (++i < HCLGE_SERDES_RETRY_NUM && | |
5191 | !(req->result & HCLGE_CMD_SERDES_DONE_B)); | |
5192 | ||
5193 | if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { | |
5194 | dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); | |
5195 | return -EBUSY; | |
5196 | } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { | |
5197 | dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); | |
5198 | return -EIO; | |
5199 | } | |
5200 | ||
3ebc5e0b | 5201 | hclge_cfg_mac_mode(hdev, en); |
e006bb00 PL |
5202 | return 0; |
5203 | } | |
5204 | ||
3ebc5e0b YL |
5205 | static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, |
5206 | int stream_id, bool enable) | |
5207 | { | |
5208 | struct hclge_desc desc; | |
5209 | struct hclge_cfg_com_tqp_queue_cmd *req = | |
5210 | (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; | |
5211 | int ret; | |
5212 | ||
5213 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); | |
5214 | req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); | |
5215 | req->stream_id = cpu_to_le16(stream_id); | |
5216 | req->enable |= enable << HCLGE_TQP_ENABLE_B; | |
5217 | ||
5218 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5219 | if (ret) | |
5220 | dev_err(&hdev->pdev->dev, | |
5221 | "Tqp enable fail, status =%d.\n", ret); | |
5222 | return ret; | |
5223 | } | |
5224 | ||
e67d9ce9 YL |
5225 | static int hclge_set_loopback(struct hnae3_handle *handle, |
5226 | enum hnae3_loop loop_mode, bool en) | |
5227 | { | |
5228 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5229 | struct hclge_dev *hdev = vport->back; | |
3ebc5e0b | 5230 | int i, ret; |
e67d9ce9 YL |
5231 | |
5232 | switch (loop_mode) { | |
67b8c316 FL |
5233 | case HNAE3_LOOP_APP: |
5234 | ret = hclge_set_app_loopback(hdev, en); | |
c39c4d98 | 5235 | break; |
86957272 FL |
5236 | case HNAE3_LOOP_SERIAL_SERDES: |
5237 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5238 | ret = hclge_set_serdes_loopback(hdev, en, loop_mode); | |
e006bb00 | 5239 | break; |
c39c4d98 YL |
5240 | default: |
5241 | ret = -ENOTSUPP; | |
5242 | dev_err(&hdev->pdev->dev, | |
5243 | "loop_mode %d is not supported\n", loop_mode); | |
5244 | break; | |
5245 | } | |
5246 | ||
3ebc5e0b YL |
5247 | for (i = 0; i < vport->alloc_tqps; i++) { |
5248 | ret = hclge_tqp_enable(hdev, i, 0, en); | |
5249 | if (ret) | |
5250 | return ret; | |
5251 | } | |
46a3df9f | 5252 | |
3ebc5e0b | 5253 | return 0; |
46a3df9f S |
5254 | } |
5255 | ||
5256 | static void hclge_reset_tqp_stats(struct hnae3_handle *handle) | |
5257 | { | |
5258 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5259 | struct hnae3_queue *queue; | |
5260 | struct hclge_tqp *tqp; | |
5261 | int i; | |
5262 | ||
5263 | for (i = 0; i < vport->alloc_tqps; i++) { | |
5264 | queue = handle->kinfo.tqp[i]; | |
5265 | tqp = container_of(queue, struct hclge_tqp, q); | |
5266 | memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); | |
5267 | } | |
5268 | } | |
5269 | ||
5270 | static int hclge_ae_start(struct hnae3_handle *handle) | |
5271 | { | |
5272 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5273 | struct hclge_dev *hdev = vport->back; | |
46a3df9f | 5274 | |
46a3df9f S |
5275 | /* mac enable */ |
5276 | hclge_cfg_mac_mode(hdev, true); | |
5277 | clear_bit(HCLGE_STATE_DOWN, &hdev->state); | |
d039ef68 | 5278 | mod_timer(&hdev->service_timer, jiffies + HZ); |
3ae84019 | 5279 | hdev->hw.mac.link = 0; |
46a3df9f | 5280 | |
f9637cc2 PL |
5281 | /* reset tqp stats */ |
5282 | hclge_reset_tqp_stats(handle); | |
5283 | ||
dda6b7d5 | 5284 | hclge_mac_start_phy(hdev); |
46a3df9f | 5285 | |
46a3df9f S |
5286 | return 0; |
5287 | } | |
5288 | ||
5289 | static void hclge_ae_stop(struct hnae3_handle *handle) | |
5290 | { | |
5291 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5292 | struct hclge_dev *hdev = vport->back; | |
46a3df9f | 5293 | |
4ee3e5a8 FL |
5294 | set_bit(HCLGE_STATE_DOWN, &hdev->state); |
5295 | ||
f9637cc2 PL |
5296 | del_timer_sync(&hdev->service_timer); |
5297 | cancel_work_sync(&hdev->service_task); | |
42b11ab7 | 5298 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); |
f9637cc2 | 5299 | |
48ac80db HT |
5300 | /* If it is not PF reset, the firmware will disable the MAC, |
5301 | * so it only need to stop phy here. | |
5302 | */ | |
5303 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && | |
5304 | hdev->reset_type != HNAE3_FUNC_RESET) { | |
4486f5c9 | 5305 | hclge_mac_stop_phy(hdev); |
f9637cc2 | 5306 | return; |
4486f5c9 | 5307 | } |
f9637cc2 | 5308 | |
46a3df9f S |
5309 | /* Mac disable */ |
5310 | hclge_cfg_mac_mode(hdev, false); | |
5311 | ||
5312 | hclge_mac_stop_phy(hdev); | |
5313 | ||
5314 | /* reset tqp stats */ | |
5315 | hclge_reset_tqp_stats(handle); | |
b91fb71c FL |
5316 | del_timer_sync(&hdev->service_timer); |
5317 | cancel_work_sync(&hdev->service_task); | |
5318 | hclge_update_link_status(hdev); | |
46a3df9f S |
5319 | } |
5320 | ||
337460de YL |
5321 | int hclge_vport_start(struct hclge_vport *vport) |
5322 | { | |
5323 | set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5324 | vport->last_active_jiffies = jiffies; | |
5325 | return 0; | |
5326 | } | |
5327 | ||
5328 | void hclge_vport_stop(struct hclge_vport *vport) | |
5329 | { | |
5330 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5331 | } | |
5332 | ||
5333 | static int hclge_client_start(struct hnae3_handle *handle) | |
5334 | { | |
5335 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5336 | ||
5337 | return hclge_vport_start(vport); | |
5338 | } | |
5339 | ||
5340 | static void hclge_client_stop(struct hnae3_handle *handle) | |
5341 | { | |
5342 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5343 | ||
5344 | hclge_vport_stop(vport); | |
5345 | } | |
5346 | ||
46a3df9f S |
5347 | static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, |
5348 | u16 cmdq_resp, u8 resp_code, | |
5349 | enum hclge_mac_vlan_tbl_opcode op) | |
5350 | { | |
5351 | struct hclge_dev *hdev = vport->back; | |
5352 | int return_status = -EIO; | |
5353 | ||
5354 | if (cmdq_resp) { | |
5355 | dev_err(&hdev->pdev->dev, | |
5356 | "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", | |
5357 | cmdq_resp); | |
5358 | return -EIO; | |
5359 | } | |
5360 | ||
5361 | if (op == HCLGE_MAC_VLAN_ADD) { | |
5362 | if ((!resp_code) || (resp_code == 1)) { | |
5363 | return_status = 0; | |
5364 | } else if (resp_code == 2) { | |
2f894c5b | 5365 | return_status = -ENOSPC; |
46a3df9f S |
5366 | dev_err(&hdev->pdev->dev, |
5367 | "add mac addr failed for uc_overflow.\n"); | |
5368 | } else if (resp_code == 3) { | |
2f894c5b | 5369 | return_status = -ENOSPC; |
46a3df9f S |
5370 | dev_err(&hdev->pdev->dev, |
5371 | "add mac addr failed for mc_overflow.\n"); | |
5372 | } else { | |
5373 | dev_err(&hdev->pdev->dev, | |
5374 | "add mac addr failed for undefined, code=%d.\n", | |
5375 | resp_code); | |
5376 | } | |
5377 | } else if (op == HCLGE_MAC_VLAN_REMOVE) { | |
5378 | if (!resp_code) { | |
5379 | return_status = 0; | |
5380 | } else if (resp_code == 1) { | |
2f894c5b | 5381 | return_status = -ENOENT; |
46a3df9f S |
5382 | dev_dbg(&hdev->pdev->dev, |
5383 | "remove mac addr failed for miss.\n"); | |
5384 | } else { | |
5385 | dev_err(&hdev->pdev->dev, | |
5386 | "remove mac addr failed for undefined, code=%d.\n", | |
5387 | resp_code); | |
5388 | } | |
5389 | } else if (op == HCLGE_MAC_VLAN_LKUP) { | |
5390 | if (!resp_code) { | |
5391 | return_status = 0; | |
5392 | } else if (resp_code == 1) { | |
2f894c5b | 5393 | return_status = -ENOENT; |
46a3df9f S |
5394 | dev_dbg(&hdev->pdev->dev, |
5395 | "lookup mac addr failed for miss.\n"); | |
5396 | } else { | |
5397 | dev_err(&hdev->pdev->dev, | |
5398 | "lookup mac addr failed for undefined, code=%d.\n", | |
5399 | resp_code); | |
5400 | } | |
5401 | } else { | |
2f894c5b | 5402 | return_status = -EINVAL; |
46a3df9f S |
5403 | dev_err(&hdev->pdev->dev, |
5404 | "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", | |
5405 | op); | |
5406 | } | |
5407 | ||
5408 | return return_status; | |
5409 | } | |
5410 | ||
5411 | static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) | |
5412 | { | |
5413 | int word_num; | |
5414 | int bit_num; | |
5415 | ||
5416 | if (vfid > 255 || vfid < 0) | |
5417 | return -EIO; | |
5418 | ||
5419 | if (vfid >= 0 && vfid <= 191) { | |
5420 | word_num = vfid / 32; | |
5421 | bit_num = vfid % 32; | |
5422 | if (clr) | |
a90bb9a5 | 5423 | desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5424 | else |
a90bb9a5 | 5425 | desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5426 | } else { |
5427 | word_num = (vfid - 192) / 32; | |
5428 | bit_num = vfid % 32; | |
5429 | if (clr) | |
a90bb9a5 | 5430 | desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5431 | else |
a90bb9a5 | 5432 | desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5433 | } |
5434 | ||
5435 | return 0; | |
5436 | } | |
5437 | ||
5438 | static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) | |
5439 | { | |
5440 | #define HCLGE_DESC_NUMBER 3 | |
5441 | #define HCLGE_FUNC_NUMBER_PER_DESC 6 | |
5442 | int i, j; | |
5443 | ||
5444 | for (i = 0; i < HCLGE_DESC_NUMBER; i++) | |
5445 | for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) | |
5446 | if (desc[i].data[j]) | |
5447 | return false; | |
5448 | ||
5449 | return true; | |
5450 | } | |
5451 | ||
d44f9b63 | 5452 | static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, |
46a3df9f S |
5453 | const u8 *addr) |
5454 | { | |
5455 | const unsigned char *mac_addr = addr; | |
5456 | u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | | |
5457 | (mac_addr[0]) | (mac_addr[1] << 8); | |
5458 | u32 low_val = mac_addr[4] | (mac_addr[5] << 8); | |
5459 | ||
5460 | new_req->mac_addr_hi32 = cpu_to_le32(high_val); | |
5461 | new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); | |
5462 | } | |
5463 | ||
46a3df9f | 5464 | static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, |
d44f9b63 | 5465 | struct hclge_mac_vlan_tbl_entry_cmd *req) |
46a3df9f S |
5466 | { |
5467 | struct hclge_dev *hdev = vport->back; | |
5468 | struct hclge_desc desc; | |
5469 | u8 resp_code; | |
a90bb9a5 | 5470 | u16 retval; |
46a3df9f S |
5471 | int ret; |
5472 | ||
5473 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); | |
5474 | ||
d44f9b63 | 5475 | memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5476 | |
5477 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5478 | if (ret) { | |
5479 | dev_err(&hdev->pdev->dev, | |
5480 | "del mac addr failed for cmd_send, ret =%d.\n", | |
5481 | ret); | |
5482 | return ret; | |
5483 | } | |
a90bb9a5 YL |
5484 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5485 | retval = le16_to_cpu(desc.retval); | |
46a3df9f | 5486 | |
a90bb9a5 | 5487 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5488 | HCLGE_MAC_VLAN_REMOVE); |
5489 | } | |
5490 | ||
5491 | static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5492 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5493 | struct hclge_desc *desc, |
5494 | bool is_mc) | |
5495 | { | |
5496 | struct hclge_dev *hdev = vport->back; | |
5497 | u8 resp_code; | |
a90bb9a5 | 5498 | u16 retval; |
46a3df9f S |
5499 | int ret; |
5500 | ||
5501 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); | |
5502 | if (is_mc) { | |
5503 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5504 | memcpy(desc[0].data, | |
5505 | req, | |
d44f9b63 | 5506 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5507 | hclge_cmd_setup_basic_desc(&desc[1], |
5508 | HCLGE_OPC_MAC_VLAN_ADD, | |
5509 | true); | |
5510 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5511 | hclge_cmd_setup_basic_desc(&desc[2], | |
5512 | HCLGE_OPC_MAC_VLAN_ADD, | |
5513 | true); | |
5514 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
5515 | } else { | |
5516 | memcpy(desc[0].data, | |
5517 | req, | |
d44f9b63 | 5518 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5519 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
5520 | } | |
5521 | if (ret) { | |
5522 | dev_err(&hdev->pdev->dev, | |
5523 | "lookup mac addr failed for cmd_send, ret =%d.\n", | |
5524 | ret); | |
5525 | return ret; | |
5526 | } | |
a90bb9a5 YL |
5527 | resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; |
5528 | retval = le16_to_cpu(desc[0].retval); | |
46a3df9f | 5529 | |
a90bb9a5 | 5530 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5531 | HCLGE_MAC_VLAN_LKUP); |
5532 | } | |
5533 | ||
5534 | static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5535 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5536 | struct hclge_desc *mc_desc) |
5537 | { | |
5538 | struct hclge_dev *hdev = vport->back; | |
5539 | int cfg_status; | |
5540 | u8 resp_code; | |
a90bb9a5 | 5541 | u16 retval; |
46a3df9f S |
5542 | int ret; |
5543 | ||
5544 | if (!mc_desc) { | |
5545 | struct hclge_desc desc; | |
5546 | ||
5547 | hclge_cmd_setup_basic_desc(&desc, | |
5548 | HCLGE_OPC_MAC_VLAN_ADD, | |
5549 | false); | |
d44f9b63 YL |
5550 | memcpy(desc.data, req, |
5551 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); | |
46a3df9f | 5552 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
a90bb9a5 YL |
5553 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5554 | retval = le16_to_cpu(desc.retval); | |
5555 | ||
5556 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5557 | resp_code, |
5558 | HCLGE_MAC_VLAN_ADD); | |
5559 | } else { | |
c3b6f755 | 5560 | hclge_cmd_reuse_desc(&mc_desc[0], false); |
46a3df9f | 5561 | mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5562 | hclge_cmd_reuse_desc(&mc_desc[1], false); |
46a3df9f | 5563 | mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5564 | hclge_cmd_reuse_desc(&mc_desc[2], false); |
46a3df9f S |
5565 | mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); |
5566 | memcpy(mc_desc[0].data, req, | |
d44f9b63 | 5567 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f | 5568 | ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); |
a90bb9a5 YL |
5569 | resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; |
5570 | retval = le16_to_cpu(mc_desc[0].retval); | |
5571 | ||
5572 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5573 | resp_code, |
5574 | HCLGE_MAC_VLAN_ADD); | |
5575 | } | |
5576 | ||
5577 | if (ret) { | |
5578 | dev_err(&hdev->pdev->dev, | |
5579 | "add mac addr failed for cmd_send, ret =%d.\n", | |
5580 | ret); | |
5581 | return ret; | |
5582 | } | |
5583 | ||
5584 | return cfg_status; | |
5585 | } | |
5586 | ||
2da5ec58 JS |
5587 | static int hclge_init_umv_space(struct hclge_dev *hdev) |
5588 | { | |
5589 | u16 allocated_size = 0; | |
5590 | int ret; | |
5591 | ||
5592 | ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, | |
5593 | true); | |
5594 | if (ret) | |
5595 | return ret; | |
5596 | ||
5597 | if (allocated_size < hdev->wanted_umv_size) | |
5598 | dev_warn(&hdev->pdev->dev, | |
5599 | "Alloc umv space failed, want %d, get %d\n", | |
5600 | hdev->wanted_umv_size, allocated_size); | |
5601 | ||
5602 | mutex_init(&hdev->umv_mutex); | |
5603 | hdev->max_umv_size = allocated_size; | |
5604 | hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); | |
5605 | hdev->share_umv_size = hdev->priv_umv_size + | |
5606 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5607 | ||
5608 | return 0; | |
5609 | } | |
5610 | ||
5611 | static int hclge_uninit_umv_space(struct hclge_dev *hdev) | |
5612 | { | |
5613 | int ret; | |
5614 | ||
5615 | if (hdev->max_umv_size > 0) { | |
5616 | ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, | |
5617 | false); | |
5618 | if (ret) | |
5619 | return ret; | |
5620 | hdev->max_umv_size = 0; | |
5621 | } | |
5622 | mutex_destroy(&hdev->umv_mutex); | |
5623 | ||
5624 | return 0; | |
5625 | } | |
5626 | ||
5627 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, | |
5628 | u16 *allocated_size, bool is_alloc) | |
5629 | { | |
5630 | struct hclge_umv_spc_alc_cmd *req; | |
5631 | struct hclge_desc desc; | |
5632 | int ret; | |
5633 | ||
5634 | req = (struct hclge_umv_spc_alc_cmd *)desc.data; | |
5635 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); | |
5636 | hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); | |
5637 | req->space_size = cpu_to_le32(space_size); | |
5638 | ||
5639 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5640 | if (ret) { | |
5641 | dev_err(&hdev->pdev->dev, | |
5642 | "%s umv space failed for cmd_send, ret =%d\n", | |
5643 | is_alloc ? "allocate" : "free", ret); | |
5644 | return ret; | |
5645 | } | |
5646 | ||
5647 | if (is_alloc && allocated_size) | |
5648 | *allocated_size = le32_to_cpu(desc.data[1]); | |
5649 | ||
5650 | return 0; | |
5651 | } | |
5652 | ||
5653 | static void hclge_reset_umv_space(struct hclge_dev *hdev) | |
5654 | { | |
5655 | struct hclge_vport *vport; | |
5656 | int i; | |
5657 | ||
5658 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
5659 | vport = &hdev->vport[i]; | |
5660 | vport->used_umv_num = 0; | |
5661 | } | |
5662 | ||
5663 | mutex_lock(&hdev->umv_mutex); | |
5664 | hdev->share_umv_size = hdev->priv_umv_size + | |
5665 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5666 | mutex_unlock(&hdev->umv_mutex); | |
5667 | } | |
5668 | ||
5669 | static bool hclge_is_umv_space_full(struct hclge_vport *vport) | |
5670 | { | |
5671 | struct hclge_dev *hdev = vport->back; | |
5672 | bool is_full; | |
5673 | ||
5674 | mutex_lock(&hdev->umv_mutex); | |
5675 | is_full = (vport->used_umv_num >= hdev->priv_umv_size && | |
5676 | hdev->share_umv_size == 0); | |
5677 | mutex_unlock(&hdev->umv_mutex); | |
5678 | ||
5679 | return is_full; | |
5680 | } | |
5681 | ||
5682 | static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) | |
5683 | { | |
5684 | struct hclge_dev *hdev = vport->back; | |
5685 | ||
5686 | mutex_lock(&hdev->umv_mutex); | |
5687 | if (is_free) { | |
5688 | if (vport->used_umv_num > hdev->priv_umv_size) | |
5689 | hdev->share_umv_size++; | |
5690 | vport->used_umv_num--; | |
5691 | } else { | |
5692 | if (vport->used_umv_num >= hdev->priv_umv_size) | |
5693 | hdev->share_umv_size--; | |
5694 | vport->used_umv_num++; | |
5695 | } | |
5696 | mutex_unlock(&hdev->umv_mutex); | |
5697 | } | |
5698 | ||
46a3df9f S |
5699 | static int hclge_add_uc_addr(struct hnae3_handle *handle, |
5700 | const unsigned char *addr) | |
5701 | { | |
5702 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5703 | ||
5704 | return hclge_add_uc_addr_common(vport, addr); | |
5705 | } | |
5706 | ||
5707 | int hclge_add_uc_addr_common(struct hclge_vport *vport, | |
5708 | const unsigned char *addr) | |
5709 | { | |
5710 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5711 | struct hclge_mac_vlan_tbl_entry_cmd req; |
bf88f41f | 5712 | struct hclge_desc desc; |
a90bb9a5 | 5713 | u16 egress_port = 0; |
04f0c72a | 5714 | int ret; |
46a3df9f S |
5715 | |
5716 | /* mac addr check */ | |
5717 | if (is_zero_ether_addr(addr) || | |
5718 | is_broadcast_ether_addr(addr) || | |
5719 | is_multicast_ether_addr(addr)) { | |
5720 | dev_err(&hdev->pdev->dev, | |
5721 | "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", | |
5722 | addr, | |
5723 | is_zero_ether_addr(addr), | |
5724 | is_broadcast_ether_addr(addr), | |
5725 | is_multicast_ether_addr(addr)); | |
5726 | return -EINVAL; | |
5727 | } | |
5728 | ||
5729 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 | 5730 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
a90bb9a5 | 5731 | |
ccc23ef3 PL |
5732 | hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, |
5733 | HCLGE_MAC_EPORT_VFID_S, vport->vport_id); | |
a90bb9a5 YL |
5734 | |
5735 | req.egress_port = cpu_to_le16(egress_port); | |
46a3df9f S |
5736 | |
5737 | hclge_prepare_mac_addr(&req, addr); | |
5738 | ||
bf88f41f JS |
5739 | /* Lookup the mac address in the mac_vlan table, and add |
5740 | * it if the entry is inexistent. Repeated unicast entry | |
5741 | * is not allowed in the mac vlan table. | |
5742 | */ | |
5743 | ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); | |
2da5ec58 JS |
5744 | if (ret == -ENOENT) { |
5745 | if (!hclge_is_umv_space_full(vport)) { | |
5746 | ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); | |
5747 | if (!ret) | |
5748 | hclge_update_umv_space(vport, false); | |
5749 | return ret; | |
5750 | } | |
5751 | ||
5752 | dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", | |
5753 | hdev->priv_umv_size); | |
5754 | ||
5755 | return -ENOSPC; | |
5756 | } | |
bf88f41f JS |
5757 | |
5758 | /* check if we just hit the duplicate */ | |
5759 | if (!ret) | |
5760 | ret = -EINVAL; | |
5761 | ||
5762 | dev_err(&hdev->pdev->dev, | |
5763 | "PF failed to add unicast entry(%pM) in the MAC table\n", | |
5764 | addr); | |
46a3df9f | 5765 | |
04f0c72a | 5766 | return ret; |
46a3df9f S |
5767 | } |
5768 | ||
5769 | static int hclge_rm_uc_addr(struct hnae3_handle *handle, | |
5770 | const unsigned char *addr) | |
5771 | { | |
5772 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5773 | ||
5774 | return hclge_rm_uc_addr_common(vport, addr); | |
5775 | } | |
5776 | ||
5777 | int hclge_rm_uc_addr_common(struct hclge_vport *vport, | |
5778 | const unsigned char *addr) | |
5779 | { | |
5780 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5781 | struct hclge_mac_vlan_tbl_entry_cmd req; |
04f0c72a | 5782 | int ret; |
46a3df9f S |
5783 | |
5784 | /* mac addr check */ | |
5785 | if (is_zero_ether_addr(addr) || | |
5786 | is_broadcast_ether_addr(addr) || | |
5787 | is_multicast_ether_addr(addr)) { | |
5788 | dev_dbg(&hdev->pdev->dev, | |
5789 | "Remove mac err! invalid mac:%pM.\n", | |
5790 | addr); | |
5791 | return -EINVAL; | |
5792 | } | |
5793 | ||
5794 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5795 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5796 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
46a3df9f | 5797 | hclge_prepare_mac_addr(&req, addr); |
04f0c72a | 5798 | ret = hclge_remove_mac_vlan_tbl(vport, &req); |
2da5ec58 JS |
5799 | if (!ret) |
5800 | hclge_update_umv_space(vport, true); | |
46a3df9f | 5801 | |
04f0c72a | 5802 | return ret; |
46a3df9f S |
5803 | } |
5804 | ||
5805 | static int hclge_add_mc_addr(struct hnae3_handle *handle, | |
5806 | const unsigned char *addr) | |
5807 | { | |
5808 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5809 | ||
2bf8098b | 5810 | return hclge_add_mc_addr_common(vport, addr); |
46a3df9f S |
5811 | } |
5812 | ||
5813 | int hclge_add_mc_addr_common(struct hclge_vport *vport, | |
5814 | const unsigned char *addr) | |
5815 | { | |
5816 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5817 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f | 5818 | struct hclge_desc desc[3]; |
46a3df9f S |
5819 | int status; |
5820 | ||
5821 | /* mac addr check */ | |
5822 | if (!is_multicast_ether_addr(addr)) { | |
5823 | dev_err(&hdev->pdev->dev, | |
5824 | "Add mc mac err! invalid mac:%pM.\n", | |
5825 | addr); | |
5826 | return -EINVAL; | |
5827 | } | |
5828 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5829 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5830 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5831 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
738a3401 | 5832 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5833 | hclge_prepare_mac_addr(&req, addr); |
5834 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5835 | if (!status) { | |
5836 | /* This mac addr exist, update VFID for it */ | |
5837 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5838 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5839 | } else { | |
5840 | /* This mac addr do not exist, add new entry for it */ | |
5841 | memset(desc[0].data, 0, sizeof(desc[0].data)); | |
5842 | memset(desc[1].data, 0, sizeof(desc[0].data)); | |
5843 | memset(desc[2].data, 0, sizeof(desc[0].data)); | |
5844 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5845 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5846 | } | |
5847 | ||
55b049be JS |
5848 | if (status == -ENOSPC) |
5849 | dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); | |
46a3df9f S |
5850 | |
5851 | return status; | |
5852 | } | |
5853 | ||
5854 | static int hclge_rm_mc_addr(struct hnae3_handle *handle, | |
5855 | const unsigned char *addr) | |
5856 | { | |
5857 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5858 | ||
5859 | return hclge_rm_mc_addr_common(vport, addr); | |
5860 | } | |
5861 | ||
5862 | int hclge_rm_mc_addr_common(struct hclge_vport *vport, | |
5863 | const unsigned char *addr) | |
5864 | { | |
5865 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5866 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
5867 | enum hclge_cmd_status status; |
5868 | struct hclge_desc desc[3]; | |
46a3df9f S |
5869 | |
5870 | /* mac addr check */ | |
5871 | if (!is_multicast_ether_addr(addr)) { | |
5872 | dev_dbg(&hdev->pdev->dev, | |
5873 | "Remove mc mac err! invalid mac:%pM.\n", | |
5874 | addr); | |
5875 | return -EINVAL; | |
5876 | } | |
5877 | ||
5878 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5879 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5880 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5881 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
738a3401 | 5882 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5883 | hclge_prepare_mac_addr(&req, addr); |
5884 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5885 | if (!status) { | |
5886 | /* This mac addr exist, remove this handle's VFID for it */ | |
5887 | hclge_update_desc_vfid(desc, vport->vport_id, true); | |
5888 | ||
5889 | if (hclge_is_all_function_id_zero(desc)) | |
5890 | /* All the vfid is zero, so need to delete this entry */ | |
5891 | status = hclge_remove_mac_vlan_tbl(vport, &req); | |
5892 | else | |
5893 | /* Not all the vfid is zero, update the vfid */ | |
5894 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5895 | ||
5896 | } else { | |
a832d8b5 XW |
5897 | /* Maybe this mac address is in mta table, but it cannot be |
5898 | * deleted here because an entry of mta represents an address | |
5899 | * range rather than a specific address. the delete action to | |
5900 | * all entries will take effect in update_mta_status called by | |
5901 | * hns3_nic_set_rx_mode. | |
5902 | */ | |
5903 | status = 0; | |
46a3df9f S |
5904 | } |
5905 | ||
46a3df9f S |
5906 | return status; |
5907 | } | |
5908 | ||
635bfb58 FL |
5909 | static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, |
5910 | u16 cmdq_resp, u8 resp_code) | |
5911 | { | |
5912 | #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 | |
5913 | #define HCLGE_ETHERTYPE_ALREADY_ADD 1 | |
5914 | #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 | |
5915 | #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 | |
5916 | ||
5917 | int return_status; | |
5918 | ||
5919 | if (cmdq_resp) { | |
5920 | dev_err(&hdev->pdev->dev, | |
5921 | "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", | |
5922 | cmdq_resp); | |
5923 | return -EIO; | |
5924 | } | |
5925 | ||
5926 | switch (resp_code) { | |
5927 | case HCLGE_ETHERTYPE_SUCCESS_ADD: | |
5928 | case HCLGE_ETHERTYPE_ALREADY_ADD: | |
5929 | return_status = 0; | |
5930 | break; | |
5931 | case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: | |
5932 | dev_err(&hdev->pdev->dev, | |
5933 | "add mac ethertype failed for manager table overflow.\n"); | |
5934 | return_status = -EIO; | |
5935 | break; | |
5936 | case HCLGE_ETHERTYPE_KEY_CONFLICT: | |
5937 | dev_err(&hdev->pdev->dev, | |
5938 | "add mac ethertype failed for key conflict.\n"); | |
5939 | return_status = -EIO; | |
5940 | break; | |
5941 | default: | |
5942 | dev_err(&hdev->pdev->dev, | |
5943 | "add mac ethertype failed for undefined, code=%d.\n", | |
5944 | resp_code); | |
5945 | return_status = -EIO; | |
5946 | } | |
5947 | ||
5948 | return return_status; | |
5949 | } | |
5950 | ||
5951 | static int hclge_add_mgr_tbl(struct hclge_dev *hdev, | |
5952 | const struct hclge_mac_mgr_tbl_entry_cmd *req) | |
5953 | { | |
5954 | struct hclge_desc desc; | |
5955 | u8 resp_code; | |
5956 | u16 retval; | |
5957 | int ret; | |
5958 | ||
5959 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); | |
5960 | memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); | |
5961 | ||
5962 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5963 | if (ret) { | |
5964 | dev_err(&hdev->pdev->dev, | |
5965 | "add mac ethertype failed for cmd_send, ret =%d.\n", | |
5966 | ret); | |
5967 | return ret; | |
5968 | } | |
5969 | ||
5970 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; | |
5971 | retval = le16_to_cpu(desc.retval); | |
5972 | ||
5973 | return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); | |
5974 | } | |
5975 | ||
5976 | static int init_mgr_tbl(struct hclge_dev *hdev) | |
5977 | { | |
5978 | int ret; | |
5979 | int i; | |
5980 | ||
5981 | for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { | |
5982 | ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); | |
5983 | if (ret) { | |
5984 | dev_err(&hdev->pdev->dev, | |
5985 | "add mac ethertype failed, ret =%d.\n", | |
5986 | ret); | |
5987 | return ret; | |
5988 | } | |
5989 | } | |
5990 | ||
5991 | return 0; | |
5992 | } | |
5993 | ||
46a3df9f S |
5994 | static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) |
5995 | { | |
5996 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5997 | struct hclge_dev *hdev = vport->back; | |
5998 | ||
5999 | ether_addr_copy(p, hdev->hw.mac.mac_addr); | |
6000 | } | |
6001 | ||
3cbf5e2d FL |
6002 | static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, |
6003 | bool is_first) | |
46a3df9f S |
6004 | { |
6005 | const unsigned char *new_addr = (const unsigned char *)p; | |
6006 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6007 | struct hclge_dev *hdev = vport->back; | |
20a5c4c0 | 6008 | int ret; |
46a3df9f S |
6009 | |
6010 | /* mac addr check */ | |
6011 | if (is_zero_ether_addr(new_addr) || | |
6012 | is_broadcast_ether_addr(new_addr) || | |
6013 | is_multicast_ether_addr(new_addr)) { | |
6014 | dev_err(&hdev->pdev->dev, | |
6015 | "Change uc mac err! invalid mac:%p.\n", | |
6016 | new_addr); | |
6017 | return -EINVAL; | |
6018 | } | |
6019 | ||
3cbf5e2d | 6020 | if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) |
20a5c4c0 | 6021 | dev_warn(&hdev->pdev->dev, |
3cbf5e2d | 6022 | "remove old uc mac address fail.\n"); |
46a3df9f | 6023 | |
20a5c4c0 FL |
6024 | ret = hclge_add_uc_addr(handle, new_addr); |
6025 | if (ret) { | |
6026 | dev_err(&hdev->pdev->dev, | |
6027 | "add uc mac address fail, ret =%d.\n", | |
6028 | ret); | |
6029 | ||
3cbf5e2d FL |
6030 | if (!is_first && |
6031 | hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) | |
20a5c4c0 | 6032 | dev_err(&hdev->pdev->dev, |
3cbf5e2d | 6033 | "restore uc mac address fail.\n"); |
20a5c4c0 FL |
6034 | |
6035 | return -EIO; | |
46a3df9f S |
6036 | } |
6037 | ||
532fdd5e | 6038 | ret = hclge_pause_addr_cfg(hdev, new_addr); |
20a5c4c0 FL |
6039 | if (ret) { |
6040 | dev_err(&hdev->pdev->dev, | |
6041 | "configure mac pause address fail, ret =%d.\n", | |
6042 | ret); | |
6043 | return -EIO; | |
6044 | } | |
6045 | ||
6046 | ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); | |
6047 | ||
6048 | return 0; | |
46a3df9f S |
6049 | } |
6050 | ||
a185d723 XW |
6051 | static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, |
6052 | int cmd) | |
6053 | { | |
6054 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6055 | struct hclge_dev *hdev = vport->back; | |
6056 | ||
6057 | if (!hdev->hw.mac.phydev) | |
6058 | return -EOPNOTSUPP; | |
6059 | ||
6060 | return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); | |
6061 | } | |
6062 | ||
46a3df9f | 6063 | static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, |
0e44d430 | 6064 | u8 fe_type, bool filter_en) |
46a3df9f | 6065 | { |
d44f9b63 | 6066 | struct hclge_vlan_filter_ctrl_cmd *req; |
46a3df9f S |
6067 | struct hclge_desc desc; |
6068 | int ret; | |
6069 | ||
6070 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); | |
6071 | ||
d44f9b63 | 6072 | req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; |
46a3df9f | 6073 | req->vlan_type = vlan_type; |
0e44d430 | 6074 | req->vlan_fe = filter_en ? fe_type : 0; |
46a3df9f S |
6075 | |
6076 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 6077 | if (ret) |
46a3df9f S |
6078 | dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", |
6079 | ret); | |
46a3df9f | 6080 | |
90415e85 | 6081 | return ret; |
46a3df9f S |
6082 | } |
6083 | ||
d818396d JS |
6084 | #define HCLGE_FILTER_TYPE_VF 0 |
6085 | #define HCLGE_FILTER_TYPE_PORT 1 | |
0e44d430 ZL |
6086 | #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) |
6087 | #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) | |
6088 | #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) | |
6089 | #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) | |
6090 | #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) | |
6091 | #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ | |
6092 | | HCLGE_FILTER_FE_ROCE_EGRESS_B) | |
6093 | #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ | |
6094 | | HCLGE_FILTER_FE_ROCE_INGRESS_B) | |
d818396d JS |
6095 | |
6096 | static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) | |
6097 | { | |
6098 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6099 | struct hclge_dev *hdev = vport->back; | |
6100 | ||
0e44d430 ZL |
6101 | if (hdev->pdev->revision >= 0x21) { |
6102 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6103 | HCLGE_FILTER_FE_EGRESS, enable); | |
6104 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, | |
6105 | HCLGE_FILTER_FE_INGRESS, enable); | |
6106 | } else { | |
6107 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6108 | HCLGE_FILTER_FE_EGRESS_V1_B, enable); | |
6109 | } | |
1e3653db JS |
6110 | if (enable) |
6111 | handle->netdev_flags |= HNAE3_VLAN_FLTR; | |
6112 | else | |
6113 | handle->netdev_flags &= ~HNAE3_VLAN_FLTR; | |
d818396d JS |
6114 | } |
6115 | ||
4e66632d YL |
6116 | static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, |
6117 | bool is_kill, u16 vlan, u8 qos, | |
6118 | __be16 proto) | |
46a3df9f S |
6119 | { |
6120 | #define HCLGE_MAX_VF_BYTES 16 | |
d44f9b63 YL |
6121 | struct hclge_vlan_filter_vf_cfg_cmd *req0; |
6122 | struct hclge_vlan_filter_vf_cfg_cmd *req1; | |
46a3df9f S |
6123 | struct hclge_desc desc[2]; |
6124 | u8 vf_byte_val; | |
6125 | u8 vf_byte_off; | |
6126 | int ret; | |
6127 | ||
6128 | hclge_cmd_setup_basic_desc(&desc[0], | |
6129 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6130 | hclge_cmd_setup_basic_desc(&desc[1], | |
6131 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6132 | ||
6133 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
6134 | ||
6135 | vf_byte_off = vfid / 8; | |
6136 | vf_byte_val = 1 << (vfid % 8); | |
6137 | ||
d44f9b63 YL |
6138 | req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
6139 | req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; | |
46a3df9f | 6140 | |
a90bb9a5 | 6141 | req0->vlan_id = cpu_to_le16(vlan); |
46a3df9f S |
6142 | req0->vlan_cfg = is_kill; |
6143 | ||
6144 | if (vf_byte_off < HCLGE_MAX_VF_BYTES) | |
6145 | req0->vf_bitmap[vf_byte_off] = vf_byte_val; | |
6146 | else | |
6147 | req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; | |
6148 | ||
6149 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
6150 | if (ret) { | |
6151 | dev_err(&hdev->pdev->dev, | |
6152 | "Send vf vlan command fail, ret =%d.\n", | |
6153 | ret); | |
6154 | return ret; | |
6155 | } | |
6156 | ||
6157 | if (!is_kill) { | |
715d610d | 6158 | #define HCLGE_VF_VLAN_NO_ENTRY 2 |
46a3df9f S |
6159 | if (!req0->resp_code || req0->resp_code == 1) |
6160 | return 0; | |
6161 | ||
715d610d YL |
6162 | if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { |
6163 | dev_warn(&hdev->pdev->dev, | |
6164 | "vf vlan table is full, vf vlan filter is disabled\n"); | |
6165 | return 0; | |
6166 | } | |
6167 | ||
46a3df9f S |
6168 | dev_err(&hdev->pdev->dev, |
6169 | "Add vf vlan filter fail, ret =%d.\n", | |
6170 | req0->resp_code); | |
6171 | } else { | |
29d3a843 | 6172 | #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 |
46a3df9f S |
6173 | if (!req0->resp_code) |
6174 | return 0; | |
6175 | ||
29d3a843 YL |
6176 | if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { |
6177 | dev_warn(&hdev->pdev->dev, | |
6178 | "vlan %d filter is not in vf vlan table\n", | |
6179 | vlan); | |
6180 | return 0; | |
6181 | } | |
6182 | ||
46a3df9f S |
6183 | dev_err(&hdev->pdev->dev, |
6184 | "Kill vf vlan filter fail, ret =%d.\n", | |
6185 | req0->resp_code); | |
6186 | } | |
6187 | ||
6188 | return -EIO; | |
6189 | } | |
6190 | ||
4e66632d YL |
6191 | static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, |
6192 | u16 vlan_id, bool is_kill) | |
46a3df9f | 6193 | { |
d44f9b63 | 6194 | struct hclge_vlan_filter_pf_cfg_cmd *req; |
46a3df9f S |
6195 | struct hclge_desc desc; |
6196 | u8 vlan_offset_byte_val; | |
6197 | u8 vlan_offset_byte; | |
6198 | u8 vlan_offset_160; | |
6199 | int ret; | |
6200 | ||
6201 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); | |
6202 | ||
6203 | vlan_offset_160 = vlan_id / 160; | |
6204 | vlan_offset_byte = (vlan_id % 160) / 8; | |
6205 | vlan_offset_byte_val = 1 << (vlan_id % 8); | |
6206 | ||
d44f9b63 | 6207 | req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; |
46a3df9f S |
6208 | req->vlan_offset = vlan_offset_160; |
6209 | req->vlan_cfg = is_kill; | |
6210 | req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; | |
6211 | ||
6212 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4e66632d YL |
6213 | if (ret) |
6214 | dev_err(&hdev->pdev->dev, | |
6215 | "port vlan command, send fail, ret =%d.\n", ret); | |
6216 | return ret; | |
6217 | } | |
6218 | ||
6219 | static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, | |
6220 | u16 vport_id, u16 vlan_id, u8 qos, | |
6221 | bool is_kill) | |
6222 | { | |
6223 | u16 vport_idx, vport_num = 0; | |
6224 | int ret; | |
6225 | ||
4935129c YL |
6226 | if (is_kill && !vlan_id) |
6227 | return 0; | |
6228 | ||
4e66632d YL |
6229 | ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, |
6230 | 0, proto); | |
46a3df9f S |
6231 | if (ret) { |
6232 | dev_err(&hdev->pdev->dev, | |
4e66632d YL |
6233 | "Set %d vport vlan filter config fail, ret =%d.\n", |
6234 | vport_id, ret); | |
46a3df9f S |
6235 | return ret; |
6236 | } | |
6237 | ||
4e66632d YL |
6238 | /* vlan 0 may be added twice when 8021q module is enabled */ |
6239 | if (!is_kill && !vlan_id && | |
6240 | test_bit(vport_id, hdev->vlan_table[vlan_id])) | |
6241 | return 0; | |
6242 | ||
6243 | if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
46a3df9f | 6244 | dev_err(&hdev->pdev->dev, |
4e66632d YL |
6245 | "Add port vlan failed, vport %d is already in vlan %d\n", |
6246 | vport_id, vlan_id); | |
6247 | return -EINVAL; | |
46a3df9f S |
6248 | } |
6249 | ||
4e66632d YL |
6250 | if (is_kill && |
6251 | !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
6252 | dev_err(&hdev->pdev->dev, | |
6253 | "Delete port vlan failed, vport %d is not in vlan %d\n", | |
6254 | vport_id, vlan_id); | |
6255 | return -EINVAL; | |
6256 | } | |
6257 | ||
3c6d4f43 | 6258 | for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) |
4e66632d YL |
6259 | vport_num++; |
6260 | ||
6261 | if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) | |
6262 | ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, | |
6263 | is_kill); | |
6264 | ||
6265 | return ret; | |
6266 | } | |
6267 | ||
6268 | int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, | |
6269 | u16 vlan_id, bool is_kill) | |
6270 | { | |
6271 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6272 | struct hclge_dev *hdev = vport->back; | |
6273 | ||
6274 | return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, | |
6275 | 0, is_kill); | |
46a3df9f S |
6276 | } |
6277 | ||
6278 | static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, | |
6279 | u16 vlan, u8 qos, __be16 proto) | |
6280 | { | |
6281 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6282 | struct hclge_dev *hdev = vport->back; | |
6283 | ||
6284 | if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) | |
6285 | return -EINVAL; | |
6286 | if (proto != htons(ETH_P_8021Q)) | |
6287 | return -EPROTONOSUPPORT; | |
6288 | ||
4e66632d | 6289 | return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); |
46a3df9f S |
6290 | } |
6291 | ||
e62f2a6b PL |
6292 | static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) |
6293 | { | |
6294 | struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; | |
6295 | struct hclge_vport_vtag_tx_cfg_cmd *req; | |
6296 | struct hclge_dev *hdev = vport->back; | |
6297 | struct hclge_desc desc; | |
6298 | int status; | |
6299 | ||
6300 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); | |
6301 | ||
6302 | req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; | |
6303 | req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); | |
6304 | req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); | |
ccc23ef3 PL |
6305 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, |
6306 | vcfg->accept_tag1 ? 1 : 0); | |
6307 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, | |
6308 | vcfg->accept_untag1 ? 1 : 0); | |
6309 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, | |
6310 | vcfg->accept_tag2 ? 1 : 0); | |
6311 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, | |
6312 | vcfg->accept_untag2 ? 1 : 0); | |
6313 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, | |
6314 | vcfg->insert_tag1_en ? 1 : 0); | |
6315 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, | |
6316 | vcfg->insert_tag2_en ? 1 : 0); | |
6317 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); | |
e62f2a6b PL |
6318 | |
6319 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6320 | req->vf_bitmap[req->vf_offset] = | |
6321 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6322 | ||
6323 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6324 | if (status) | |
6325 | dev_err(&hdev->pdev->dev, | |
6326 | "Send port txvlan cfg command fail, ret =%d\n", | |
6327 | status); | |
6328 | ||
6329 | return status; | |
6330 | } | |
6331 | ||
6332 | static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) | |
6333 | { | |
6334 | struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; | |
6335 | struct hclge_vport_vtag_rx_cfg_cmd *req; | |
6336 | struct hclge_dev *hdev = vport->back; | |
6337 | struct hclge_desc desc; | |
6338 | int status; | |
6339 | ||
6340 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); | |
6341 | ||
6342 | req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; | |
ccc23ef3 PL |
6343 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, |
6344 | vcfg->strip_tag1_en ? 1 : 0); | |
6345 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, | |
6346 | vcfg->strip_tag2_en ? 1 : 0); | |
6347 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, | |
6348 | vcfg->vlan1_vlan_prionly ? 1 : 0); | |
6349 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, | |
6350 | vcfg->vlan2_vlan_prionly ? 1 : 0); | |
e62f2a6b PL |
6351 | |
6352 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6353 | req->vf_bitmap[req->vf_offset] = | |
6354 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6355 | ||
6356 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6357 | if (status) | |
6358 | dev_err(&hdev->pdev->dev, | |
6359 | "Send port rxvlan cfg command fail, ret =%d\n", | |
6360 | status); | |
6361 | ||
6362 | return status; | |
6363 | } | |
6364 | ||
6365 | static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) | |
6366 | { | |
6367 | struct hclge_rx_vlan_type_cfg_cmd *rx_req; | |
6368 | struct hclge_tx_vlan_type_cfg_cmd *tx_req; | |
6369 | struct hclge_desc desc; | |
6370 | int status; | |
6371 | ||
6372 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); | |
6373 | rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; | |
6374 | rx_req->ot_fst_vlan_type = | |
6375 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); | |
6376 | rx_req->ot_sec_vlan_type = | |
6377 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); | |
6378 | rx_req->in_fst_vlan_type = | |
6379 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); | |
6380 | rx_req->in_sec_vlan_type = | |
6381 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); | |
6382 | ||
6383 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6384 | if (status) { | |
6385 | dev_err(&hdev->pdev->dev, | |
6386 | "Send rxvlan protocol type command fail, ret =%d\n", | |
6387 | status); | |
6388 | return status; | |
6389 | } | |
6390 | ||
6391 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); | |
6392 | ||
855f03fb | 6393 | tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; |
e62f2a6b PL |
6394 | tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); |
6395 | tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); | |
6396 | ||
6397 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6398 | if (status) | |
6399 | dev_err(&hdev->pdev->dev, | |
6400 | "Send txvlan protocol type command fail, ret =%d\n", | |
6401 | status); | |
6402 | ||
6403 | return status; | |
6404 | } | |
6405 | ||
46a3df9f S |
6406 | static int hclge_init_vlan_config(struct hclge_dev *hdev) |
6407 | { | |
e62f2a6b PL |
6408 | #define HCLGE_DEF_VLAN_TYPE 0x8100 |
6409 | ||
1e3653db | 6410 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
e62f2a6b | 6411 | struct hclge_vport *vport; |
46a3df9f | 6412 | int ret; |
e62f2a6b PL |
6413 | int i; |
6414 | ||
0e44d430 ZL |
6415 | if (hdev->pdev->revision >= 0x21) { |
6416 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6417 | HCLGE_FILTER_FE_EGRESS, true); | |
6418 | if (ret) | |
6419 | return ret; | |
46a3df9f | 6420 | |
0e44d430 ZL |
6421 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, |
6422 | HCLGE_FILTER_FE_INGRESS, true); | |
6423 | if (ret) | |
6424 | return ret; | |
6425 | } else { | |
6426 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6427 | HCLGE_FILTER_FE_EGRESS_V1_B, | |
6428 | true); | |
6429 | if (ret) | |
6430 | return ret; | |
6431 | } | |
46a3df9f | 6432 | |
1e3653db JS |
6433 | handle->netdev_flags |= HNAE3_VLAN_FLTR; |
6434 | ||
e62f2a6b PL |
6435 | hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; |
6436 | hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6437 | hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6438 | hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6439 | hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6440 | hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6441 | ||
6442 | ret = hclge_set_vlan_protocol_type(hdev); | |
5e43aef8 L |
6443 | if (ret) |
6444 | return ret; | |
46a3df9f | 6445 | |
e62f2a6b PL |
6446 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
6447 | vport = &hdev->vport[i]; | |
b75b1a56 PL |
6448 | vport->txvlan_cfg.accept_tag1 = true; |
6449 | vport->txvlan_cfg.accept_untag1 = true; | |
6450 | ||
6451 | /* accept_tag2 and accept_untag2 are not supported on | |
6452 | * pdev revision(0x20), new revision support them. The | |
6453 | * value of this two fields will not return error when driver | |
6454 | * send command to fireware in revision(0x20). | |
6455 | * This two fields can not configured by user. | |
6456 | */ | |
6457 | vport->txvlan_cfg.accept_tag2 = true; | |
6458 | vport->txvlan_cfg.accept_untag2 = true; | |
6459 | ||
e62f2a6b PL |
6460 | vport->txvlan_cfg.insert_tag1_en = false; |
6461 | vport->txvlan_cfg.insert_tag2_en = false; | |
6462 | vport->txvlan_cfg.default_tag1 = 0; | |
6463 | vport->txvlan_cfg.default_tag2 = 0; | |
6464 | ||
6465 | ret = hclge_set_vlan_tx_offload_cfg(vport); | |
6466 | if (ret) | |
6467 | return ret; | |
6468 | ||
6469 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6470 | vport->rxvlan_cfg.strip_tag2_en = true; | |
6471 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6472 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6473 | ||
6474 | ret = hclge_set_vlan_rx_offload_cfg(vport); | |
6475 | if (ret) | |
6476 | return ret; | |
6477 | } | |
6478 | ||
4e66632d | 6479 | return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); |
46a3df9f S |
6480 | } |
6481 | ||
3849d494 | 6482 | int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
5f9a7732 PL |
6483 | { |
6484 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6485 | ||
6486 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6487 | vport->rxvlan_cfg.strip_tag2_en = enable; | |
6488 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6489 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6490 | ||
6491 | return hclge_set_vlan_rx_offload_cfg(vport); | |
6492 | } | |
6493 | ||
4ee09281 | 6494 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) |
46a3df9f | 6495 | { |
d44f9b63 | 6496 | struct hclge_config_max_frm_size_cmd *req; |
46a3df9f | 6497 | struct hclge_desc desc; |
46a3df9f | 6498 | |
46a3df9f S |
6499 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); |
6500 | ||
d44f9b63 | 6501 | req = (struct hclge_config_max_frm_size_cmd *)desc.data; |
4ee09281 | 6502 | req->max_frm_size = cpu_to_le16(new_mps); |
b86fdbf3 | 6503 | req->min_frm_size = HCLGE_MAC_MIN_FRAME; |
46a3df9f | 6504 | |
4ee09281 | 6505 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
46a3df9f S |
6506 | } |
6507 | ||
12341881 FL |
6508 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
6509 | { | |
6510 | struct hclge_vport *vport = hclge_get_vport(handle); | |
b2c04029 YL |
6511 | |
6512 | return hclge_set_vport_mtu(vport, new_mtu); | |
6513 | } | |
6514 | ||
6515 | int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) | |
6516 | { | |
12341881 | 6517 | struct hclge_dev *hdev = vport->back; |
b2c04029 | 6518 | int i, max_frm_size, ret = 0; |
12341881 | 6519 | |
4ee09281 YL |
6520 | max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
6521 | if (max_frm_size < HCLGE_MAC_MIN_FRAME || | |
6522 | max_frm_size > HCLGE_MAC_MAX_FRAME) | |
6523 | return -EINVAL; | |
6524 | ||
b2c04029 YL |
6525 | max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); |
6526 | mutex_lock(&hdev->vport_lock); | |
6527 | /* VF's mps must fit within hdev->mps */ | |
6528 | if (vport->vport_id && max_frm_size > hdev->mps) { | |
6529 | mutex_unlock(&hdev->vport_lock); | |
6530 | return -EINVAL; | |
6531 | } else if (vport->vport_id) { | |
6532 | vport->mps = max_frm_size; | |
6533 | mutex_unlock(&hdev->vport_lock); | |
6534 | return 0; | |
6535 | } | |
6536 | ||
6537 | /* PF's mps must be greater then VF's mps */ | |
6538 | for (i = 1; i < hdev->num_alloc_vport; i++) | |
6539 | if (max_frm_size < hdev->vport[i].mps) { | |
6540 | mutex_unlock(&hdev->vport_lock); | |
6541 | return -EINVAL; | |
6542 | } | |
6543 | ||
268868f8 YL |
6544 | hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
6545 | ||
4ee09281 | 6546 | ret = hclge_set_mac_mtu(hdev, max_frm_size); |
12341881 FL |
6547 | if (ret) { |
6548 | dev_err(&hdev->pdev->dev, | |
6549 | "Change mtu fail, ret =%d\n", ret); | |
b2c04029 | 6550 | goto out; |
12341881 FL |
6551 | } |
6552 | ||
4ee09281 | 6553 | hdev->mps = max_frm_size; |
b2c04029 | 6554 | vport->mps = max_frm_size; |
4ee09281 | 6555 | |
12341881 FL |
6556 | ret = hclge_buffer_alloc(hdev); |
6557 | if (ret) | |
6558 | dev_err(&hdev->pdev->dev, | |
6559 | "Allocate buffer fail, ret =%d\n", ret); | |
6560 | ||
b2c04029 | 6561 | out: |
268868f8 | 6562 | hclge_notify_client(hdev, HNAE3_UP_CLIENT); |
b2c04029 | 6563 | mutex_unlock(&hdev->vport_lock); |
12341881 FL |
6564 | return ret; |
6565 | } | |
6566 | ||
46a3df9f S |
6567 | static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, |
6568 | bool enable) | |
6569 | { | |
d44f9b63 | 6570 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6571 | struct hclge_desc desc; |
6572 | int ret; | |
6573 | ||
6574 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); | |
6575 | ||
d44f9b63 | 6576 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f | 6577 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
ccc23ef3 | 6578 | hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); |
46a3df9f S |
6579 | |
6580 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6581 | if (ret) { | |
6582 | dev_err(&hdev->pdev->dev, | |
6583 | "Send tqp reset cmd error, status =%d\n", ret); | |
6584 | return ret; | |
6585 | } | |
6586 | ||
6587 | return 0; | |
6588 | } | |
6589 | ||
6590 | static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) | |
6591 | { | |
d44f9b63 | 6592 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6593 | struct hclge_desc desc; |
6594 | int ret; | |
6595 | ||
6596 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); | |
6597 | ||
d44f9b63 | 6598 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f S |
6599 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
6600 | ||
6601 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6602 | if (ret) { | |
6603 | dev_err(&hdev->pdev->dev, | |
6604 | "Get reset status error, status =%d\n", ret); | |
6605 | return ret; | |
6606 | } | |
6607 | ||
ccc23ef3 | 6608 | return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); |
46a3df9f S |
6609 | } |
6610 | ||
e5e89cda PL |
6611 | static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, |
6612 | u16 queue_id) | |
6613 | { | |
6614 | struct hnae3_queue *queue; | |
6615 | struct hclge_tqp *tqp; | |
6616 | ||
6617 | queue = handle->kinfo.tqp[queue_id]; | |
6618 | tqp = container_of(queue, struct hclge_tqp, q); | |
6619 | ||
6620 | return tqp->index; | |
6621 | } | |
6622 | ||
abe62a63 | 6623 | int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
46a3df9f S |
6624 | { |
6625 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6626 | struct hclge_dev *hdev = vport->back; | |
6627 | int reset_try_times = 0; | |
6628 | int reset_status; | |
e5e89cda | 6629 | u16 queue_gid; |
abe62a63 | 6630 | int ret = 0; |
46a3df9f | 6631 | |
e5e89cda PL |
6632 | queue_gid = hclge_covert_handle_qid_global(handle, queue_id); |
6633 | ||
46a3df9f S |
6634 | ret = hclge_tqp_enable(hdev, queue_id, 0, false); |
6635 | if (ret) { | |
abe62a63 HT |
6636 | dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); |
6637 | return ret; | |
46a3df9f S |
6638 | } |
6639 | ||
e5e89cda | 6640 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); |
46a3df9f | 6641 | if (ret) { |
abe62a63 HT |
6642 | dev_err(&hdev->pdev->dev, |
6643 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6644 | return ret; | |
46a3df9f S |
6645 | } |
6646 | ||
6647 | reset_try_times = 0; | |
6648 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6649 | /* Wait for tqp hw reset */ | |
6650 | msleep(20); | |
e5e89cda | 6651 | reset_status = hclge_get_reset_status(hdev, queue_gid); |
46a3df9f S |
6652 | if (reset_status) |
6653 | break; | |
6654 | } | |
6655 | ||
6656 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
abe62a63 HT |
6657 | dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); |
6658 | return ret; | |
46a3df9f S |
6659 | } |
6660 | ||
e5e89cda | 6661 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); |
abe62a63 HT |
6662 | if (ret) |
6663 | dev_err(&hdev->pdev->dev, | |
6664 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6665 | ||
6666 | return ret; | |
46a3df9f S |
6667 | } |
6668 | ||
d3ea7fc4 PL |
6669 | void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) |
6670 | { | |
6671 | struct hclge_dev *hdev = vport->back; | |
6672 | int reset_try_times = 0; | |
6673 | int reset_status; | |
6674 | u16 queue_gid; | |
6675 | int ret; | |
6676 | ||
6677 | queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); | |
6678 | ||
6679 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); | |
6680 | if (ret) { | |
6681 | dev_warn(&hdev->pdev->dev, | |
6682 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6683 | return; | |
6684 | } | |
6685 | ||
6686 | reset_try_times = 0; | |
6687 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6688 | /* Wait for tqp hw reset */ | |
6689 | msleep(20); | |
6690 | reset_status = hclge_get_reset_status(hdev, queue_gid); | |
6691 | if (reset_status) | |
6692 | break; | |
6693 | } | |
6694 | ||
6695 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
6696 | dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); | |
6697 | return; | |
6698 | } | |
6699 | ||
6700 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); | |
6701 | if (ret) | |
6702 | dev_warn(&hdev->pdev->dev, | |
6703 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6704 | } | |
6705 | ||
46a3df9f S |
6706 | static u32 hclge_get_fw_version(struct hnae3_handle *handle) |
6707 | { | |
6708 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6709 | struct hclge_dev *hdev = vport->back; | |
6710 | ||
6711 | return hdev->fw_version; | |
6712 | } | |
6713 | ||
09ea401e PL |
6714 | static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) |
6715 | { | |
6716 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6717 | ||
6718 | if (!phydev) | |
6719 | return; | |
6720 | ||
6721 | phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); | |
6722 | ||
6723 | if (rx_en) | |
6724 | phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; | |
6725 | ||
6726 | if (tx_en) | |
6727 | phydev->advertising ^= ADVERTISED_Asym_Pause; | |
6728 | } | |
6729 | ||
6730 | static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) | |
6731 | { | |
09ea401e PL |
6732 | int ret; |
6733 | ||
6734 | if (rx_en && tx_en) | |
7a28a82a | 6735 | hdev->fc_mode_last_time = HCLGE_FC_FULL; |
09ea401e | 6736 | else if (rx_en && !tx_en) |
7a28a82a | 6737 | hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; |
09ea401e | 6738 | else if (!rx_en && tx_en) |
7a28a82a | 6739 | hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; |
09ea401e | 6740 | else |
7a28a82a | 6741 | hdev->fc_mode_last_time = HCLGE_FC_NONE; |
09ea401e | 6742 | |
7a28a82a | 6743 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) |
09ea401e | 6744 | return 0; |
09ea401e PL |
6745 | |
6746 | ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); | |
6747 | if (ret) { | |
6748 | dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", | |
6749 | ret); | |
6750 | return ret; | |
6751 | } | |
6752 | ||
7a28a82a | 6753 | hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
09ea401e PL |
6754 | |
6755 | return 0; | |
6756 | } | |
6757 | ||
6282f2ea PL |
6758 | int hclge_cfg_flowctrl(struct hclge_dev *hdev) |
6759 | { | |
6760 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6761 | u16 remote_advertising = 0; | |
6762 | u16 local_advertising = 0; | |
6763 | u32 rx_pause, tx_pause; | |
6764 | u8 flowctl; | |
6765 | ||
6766 | if (!phydev->link || !phydev->autoneg) | |
6767 | return 0; | |
6768 | ||
6769 | if (phydev->advertising & ADVERTISED_Pause) | |
6770 | local_advertising = ADVERTISE_PAUSE_CAP; | |
6771 | ||
6772 | if (phydev->advertising & ADVERTISED_Asym_Pause) | |
6773 | local_advertising |= ADVERTISE_PAUSE_ASYM; | |
6774 | ||
6775 | if (phydev->pause) | |
6776 | remote_advertising = LPA_PAUSE_CAP; | |
6777 | ||
6778 | if (phydev->asym_pause) | |
6779 | remote_advertising |= LPA_PAUSE_ASYM; | |
6780 | ||
6781 | flowctl = mii_resolve_flowctrl_fdx(local_advertising, | |
6782 | remote_advertising); | |
6783 | tx_pause = flowctl & FLOW_CTRL_TX; | |
6784 | rx_pause = flowctl & FLOW_CTRL_RX; | |
6785 | ||
6786 | if (phydev->duplex == HCLGE_MAC_HALF) { | |
6787 | tx_pause = 0; | |
6788 | rx_pause = 0; | |
6789 | } | |
6790 | ||
6791 | return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); | |
6792 | } | |
6793 | ||
46a3df9f S |
6794 | static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, |
6795 | u32 *rx_en, u32 *tx_en) | |
6796 | { | |
6797 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6798 | struct hclge_dev *hdev = vport->back; | |
6799 | ||
6800 | *auto_neg = hclge_get_autoneg(handle); | |
6801 | ||
6802 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6803 | *rx_en = 0; | |
6804 | *tx_en = 0; | |
6805 | return; | |
6806 | } | |
6807 | ||
6808 | if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { | |
6809 | *rx_en = 1; | |
6810 | *tx_en = 0; | |
6811 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { | |
6812 | *tx_en = 1; | |
6813 | *rx_en = 0; | |
6814 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { | |
6815 | *rx_en = 1; | |
6816 | *tx_en = 1; | |
6817 | } else { | |
6818 | *rx_en = 0; | |
6819 | *tx_en = 0; | |
6820 | } | |
6821 | } | |
6822 | ||
09ea401e PL |
6823 | static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, |
6824 | u32 rx_en, u32 tx_en) | |
6825 | { | |
6826 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6827 | struct hclge_dev *hdev = vport->back; | |
6828 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6829 | u32 fc_autoneg; | |
6830 | ||
09ea401e PL |
6831 | fc_autoneg = hclge_get_autoneg(handle); |
6832 | if (auto_neg != fc_autoneg) { | |
6833 | dev_info(&hdev->pdev->dev, | |
6834 | "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); | |
6835 | return -EOPNOTSUPP; | |
6836 | } | |
6837 | ||
6838 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6839 | dev_info(&hdev->pdev->dev, | |
6840 | "Priority flow control enabled. Cannot set link flow control.\n"); | |
6841 | return -EOPNOTSUPP; | |
6842 | } | |
6843 | ||
6844 | hclge_set_flowctrl_adv(hdev, rx_en, tx_en); | |
6845 | ||
6846 | if (!fc_autoneg) | |
6847 | return hclge_cfg_pauseparam(hdev, rx_en, tx_en); | |
6848 | ||
bef24782 FL |
6849 | /* Only support flow control negotiation for netdev with |
6850 | * phy attached for now. | |
6851 | */ | |
6852 | if (!phydev) | |
6853 | return -EOPNOTSUPP; | |
6854 | ||
09ea401e PL |
6855 | return phy_start_aneg(phydev); |
6856 | } | |
6857 | ||
46a3df9f S |
6858 | static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, |
6859 | u8 *auto_neg, u32 *speed, u8 *duplex) | |
6860 | { | |
6861 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6862 | struct hclge_dev *hdev = vport->back; | |
6863 | ||
6864 | if (speed) | |
6865 | *speed = hdev->hw.mac.speed; | |
6866 | if (duplex) | |
6867 | *duplex = hdev->hw.mac.duplex; | |
6868 | if (auto_neg) | |
6869 | *auto_neg = hdev->hw.mac.autoneg; | |
6870 | } | |
6871 | ||
6872 | static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) | |
6873 | { | |
6874 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6875 | struct hclge_dev *hdev = vport->back; | |
6876 | ||
6877 | if (media_type) | |
6878 | *media_type = hdev->hw.mac.media_type; | |
6879 | } | |
6880 | ||
6881 | static void hclge_get_mdix_mode(struct hnae3_handle *handle, | |
6882 | u8 *tp_mdix_ctrl, u8 *tp_mdix) | |
6883 | { | |
6884 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6885 | struct hclge_dev *hdev = vport->back; | |
6886 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6887 | int mdix_ctrl, mdix, retval, is_resolved; | |
6888 | ||
6889 | if (!phydev) { | |
6890 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6891 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6892 | return; | |
6893 | } | |
6894 | ||
6895 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); | |
6896 | ||
6897 | retval = phy_read(phydev, HCLGE_PHY_CSC_REG); | |
ccc23ef3 PL |
6898 | mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, |
6899 | HCLGE_PHY_MDIX_CTRL_S); | |
46a3df9f S |
6900 | |
6901 | retval = phy_read(phydev, HCLGE_PHY_CSS_REG); | |
ccc23ef3 PL |
6902 | mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); |
6903 | is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); | |
46a3df9f S |
6904 | |
6905 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); | |
6906 | ||
6907 | switch (mdix_ctrl) { | |
6908 | case 0x0: | |
6909 | *tp_mdix_ctrl = ETH_TP_MDI; | |
6910 | break; | |
6911 | case 0x1: | |
6912 | *tp_mdix_ctrl = ETH_TP_MDI_X; | |
6913 | break; | |
6914 | case 0x3: | |
6915 | *tp_mdix_ctrl = ETH_TP_MDI_AUTO; | |
6916 | break; | |
6917 | default: | |
6918 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6919 | break; | |
6920 | } | |
6921 | ||
6922 | if (!is_resolved) | |
6923 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6924 | else if (mdix) | |
6925 | *tp_mdix = ETH_TP_MDI_X; | |
6926 | else | |
6927 | *tp_mdix = ETH_TP_MDI; | |
6928 | } | |
6929 | ||
dda6b7d5 FL |
6930 | static int hclge_init_instance_hw(struct hclge_dev *hdev) |
6931 | { | |
6932 | return hclge_mac_connect_phy(hdev); | |
6933 | } | |
6934 | ||
6935 | static void hclge_uninit_instance_hw(struct hclge_dev *hdev) | |
6936 | { | |
6937 | hclge_mac_disconnect_phy(hdev); | |
6938 | } | |
6939 | ||
46a3df9f S |
6940 | static int hclge_init_client_instance(struct hnae3_client *client, |
6941 | struct hnae3_ae_dev *ae_dev) | |
6942 | { | |
6943 | struct hclge_dev *hdev = ae_dev->priv; | |
6944 | struct hclge_vport *vport; | |
6945 | int i, ret; | |
6946 | ||
6947 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
6948 | vport = &hdev->vport[i]; | |
6949 | ||
6950 | switch (client->type) { | |
6951 | case HNAE3_CLIENT_KNIC: | |
6952 | ||
6953 | hdev->nic_client = client; | |
6954 | vport->nic.client = client; | |
6955 | ret = client->ops->init_instance(&vport->nic); | |
6956 | if (ret) | |
2f59de78 | 6957 | goto clear_nic; |
46a3df9f | 6958 | |
dda6b7d5 FL |
6959 | ret = hclge_init_instance_hw(hdev); |
6960 | if (ret) { | |
6961 | client->ops->uninit_instance(&vport->nic, | |
6962 | 0); | |
2f59de78 | 6963 | goto clear_nic; |
dda6b7d5 FL |
6964 | } |
6965 | ||
8ed41eeb JS |
6966 | hnae3_set_client_init_flag(client, ae_dev, 1); |
6967 | ||
46a3df9f | 6968 | if (hdev->roce_client && |
e92a0843 | 6969 | hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
6970 | struct hnae3_client *rc = hdev->roce_client; |
6971 | ||
6972 | ret = hclge_init_roce_base_info(vport); | |
6973 | if (ret) | |
2f59de78 | 6974 | goto clear_roce; |
46a3df9f S |
6975 | |
6976 | ret = rc->ops->init_instance(&vport->roce); | |
6977 | if (ret) | |
2f59de78 | 6978 | goto clear_roce; |
8ed41eeb JS |
6979 | |
6980 | hnae3_set_client_init_flag(hdev->roce_client, | |
6981 | ae_dev, 1); | |
46a3df9f S |
6982 | } |
6983 | ||
6984 | break; | |
6985 | case HNAE3_CLIENT_UNIC: | |
6986 | hdev->nic_client = client; | |
6987 | vport->nic.client = client; | |
6988 | ||
6989 | ret = client->ops->init_instance(&vport->nic); | |
6990 | if (ret) | |
2f59de78 | 6991 | goto clear_nic; |
46a3df9f | 6992 | |
8ed41eeb JS |
6993 | hnae3_set_client_init_flag(client, ae_dev, 1); |
6994 | ||
46a3df9f S |
6995 | break; |
6996 | case HNAE3_CLIENT_ROCE: | |
e92a0843 | 6997 | if (hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
6998 | hdev->roce_client = client; |
6999 | vport->roce.client = client; | |
7000 | } | |
7001 | ||
3a46f34d | 7002 | if (hdev->roce_client && hdev->nic_client) { |
46a3df9f S |
7003 | ret = hclge_init_roce_base_info(vport); |
7004 | if (ret) | |
2f59de78 | 7005 | goto clear_roce; |
46a3df9f S |
7006 | |
7007 | ret = client->ops->init_instance(&vport->roce); | |
7008 | if (ret) | |
2f59de78 | 7009 | goto clear_roce; |
8ed41eeb JS |
7010 | |
7011 | hnae3_set_client_init_flag(client, ae_dev, 1); | |
46a3df9f | 7012 | } |
085920ba JS |
7013 | |
7014 | break; | |
7015 | default: | |
7016 | return -EINVAL; | |
46a3df9f S |
7017 | } |
7018 | } | |
7019 | ||
7020 | return 0; | |
2f59de78 JS |
7021 | |
7022 | clear_nic: | |
7023 | hdev->nic_client = NULL; | |
7024 | vport->nic.client = NULL; | |
7025 | return ret; | |
7026 | clear_roce: | |
7027 | hdev->roce_client = NULL; | |
7028 | vport->roce.client = NULL; | |
7029 | return ret; | |
46a3df9f S |
7030 | } |
7031 | ||
7032 | static void hclge_uninit_client_instance(struct hnae3_client *client, | |
7033 | struct hnae3_ae_dev *ae_dev) | |
7034 | { | |
7035 | struct hclge_dev *hdev = ae_dev->priv; | |
7036 | struct hclge_vport *vport; | |
7037 | int i; | |
7038 | ||
7039 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
7040 | vport = &hdev->vport[i]; | |
a17dcf3f | 7041 | if (hdev->roce_client) { |
46a3df9f S |
7042 | hdev->roce_client->ops->uninit_instance(&vport->roce, |
7043 | 0); | |
a17dcf3f L |
7044 | hdev->roce_client = NULL; |
7045 | vport->roce.client = NULL; | |
7046 | } | |
46a3df9f S |
7047 | if (client->type == HNAE3_CLIENT_ROCE) |
7048 | return; | |
2f59de78 | 7049 | if (hdev->nic_client && client->ops->uninit_instance) { |
dda6b7d5 | 7050 | hclge_uninit_instance_hw(hdev); |
46a3df9f | 7051 | client->ops->uninit_instance(&vport->nic, 0); |
a17dcf3f L |
7052 | hdev->nic_client = NULL; |
7053 | vport->nic.client = NULL; | |
7054 | } | |
46a3df9f S |
7055 | } |
7056 | } | |
7057 | ||
7058 | static int hclge_pci_init(struct hclge_dev *hdev) | |
7059 | { | |
7060 | struct pci_dev *pdev = hdev->pdev; | |
7061 | struct hclge_hw *hw; | |
7062 | int ret; | |
7063 | ||
7064 | ret = pci_enable_device(pdev); | |
7065 | if (ret) { | |
7066 | dev_err(&pdev->dev, "failed to enable PCI device\n"); | |
6c46284e | 7067 | return ret; |
46a3df9f S |
7068 | } |
7069 | ||
7070 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
7071 | if (ret) { | |
7072 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
7073 | if (ret) { | |
7074 | dev_err(&pdev->dev, | |
7075 | "can't set consistent PCI DMA"); | |
7076 | goto err_disable_device; | |
7077 | } | |
7078 | dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); | |
7079 | } | |
7080 | ||
7081 | ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); | |
7082 | if (ret) { | |
7083 | dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); | |
7084 | goto err_disable_device; | |
7085 | } | |
7086 | ||
7087 | pci_set_master(pdev); | |
7088 | hw = &hdev->hw; | |
46a3df9f S |
7089 | hw->io_base = pcim_iomap(pdev, 2, 0); |
7090 | if (!hw->io_base) { | |
7091 | dev_err(&pdev->dev, "Can't map configuration register space\n"); | |
7092 | ret = -ENOMEM; | |
7093 | goto err_clr_master; | |
7094 | } | |
7095 | ||
709eb41a L |
7096 | hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); |
7097 | ||
46a3df9f S |
7098 | return 0; |
7099 | err_clr_master: | |
7100 | pci_clear_master(pdev); | |
7101 | pci_release_regions(pdev); | |
7102 | err_disable_device: | |
7103 | pci_disable_device(pdev); | |
46a3df9f S |
7104 | |
7105 | return ret; | |
7106 | } | |
7107 | ||
7108 | static void hclge_pci_uninit(struct hclge_dev *hdev) | |
7109 | { | |
7110 | struct pci_dev *pdev = hdev->pdev; | |
7111 | ||
7d6d639b | 7112 | pcim_iounmap(pdev, hdev->hw.io_base); |
887c3820 | 7113 | pci_free_irq_vectors(pdev); |
46a3df9f S |
7114 | pci_clear_master(pdev); |
7115 | pci_release_mem_regions(pdev); | |
7116 | pci_disable_device(pdev); | |
7117 | } | |
7118 | ||
2ec3d9f0 PL |
7119 | static void hclge_state_init(struct hclge_dev *hdev) |
7120 | { | |
7121 | set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); | |
7122 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7123 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
7124 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
7125 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
7126 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
7127 | } | |
7128 | ||
7129 | static void hclge_state_uninit(struct hclge_dev *hdev) | |
7130 | { | |
7131 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7132 | ||
7133 | if (hdev->service_timer.function) | |
7134 | del_timer_sync(&hdev->service_timer); | |
1afdb53a HT |
7135 | if (hdev->reset_timer.function) |
7136 | del_timer_sync(&hdev->reset_timer); | |
2ec3d9f0 PL |
7137 | if (hdev->service_task.func) |
7138 | cancel_work_sync(&hdev->service_task); | |
7139 | if (hdev->rst_service_task.func) | |
7140 | cancel_work_sync(&hdev->rst_service_task); | |
7141 | if (hdev->mbx_service_task.func) | |
7142 | cancel_work_sync(&hdev->mbx_service_task); | |
7143 | } | |
7144 | ||
26977990 HT |
7145 | static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) |
7146 | { | |
7147 | #define HCLGE_FLR_WAIT_MS 100 | |
7148 | #define HCLGE_FLR_WAIT_CNT 50 | |
7149 | struct hclge_dev *hdev = ae_dev->priv; | |
7150 | int cnt = 0; | |
7151 | ||
7152 | clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
7153 | clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7154 | set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); | |
7155 | hclge_reset_event(hdev->pdev, NULL); | |
7156 | ||
7157 | while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && | |
7158 | cnt++ < HCLGE_FLR_WAIT_CNT) | |
7159 | msleep(HCLGE_FLR_WAIT_MS); | |
7160 | ||
7161 | if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) | |
7162 | dev_err(&hdev->pdev->dev, | |
7163 | "flr wait down timeout: %d\n", cnt); | |
7164 | } | |
7165 | ||
7166 | static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) | |
7167 | { | |
7168 | struct hclge_dev *hdev = ae_dev->priv; | |
7169 | ||
7170 | set_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7171 | } | |
7172 | ||
46a3df9f S |
7173 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
7174 | { | |
7175 | struct pci_dev *pdev = ae_dev->pdev; | |
46a3df9f S |
7176 | struct hclge_dev *hdev; |
7177 | int ret; | |
7178 | ||
7179 | hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); | |
7180 | if (!hdev) { | |
7181 | ret = -ENOMEM; | |
e0027501 | 7182 | goto out; |
46a3df9f S |
7183 | } |
7184 | ||
46a3df9f S |
7185 | hdev->pdev = pdev; |
7186 | hdev->ae_dev = ae_dev; | |
4ed340ab | 7187 | hdev->reset_type = HNAE3_NONE_RESET; |
1a2f7bf2 | 7188 | hdev->reset_level = HNAE3_FUNC_RESET; |
46a3df9f | 7189 | ae_dev->priv = hdev; |
4ee09281 | 7190 | hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
46a3df9f | 7191 | |
b2c04029 YL |
7192 | mutex_init(&hdev->vport_lock); |
7193 | ||
46a3df9f S |
7194 | ret = hclge_pci_init(hdev); |
7195 | if (ret) { | |
7196 | dev_err(&pdev->dev, "PCI init failed\n"); | |
e0027501 | 7197 | goto out; |
46a3df9f S |
7198 | } |
7199 | ||
3efb960f L |
7200 | /* Firmware command queue initialize */ |
7201 | ret = hclge_cmd_queue_init(hdev); | |
7202 | if (ret) { | |
7203 | dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); | |
e0027501 | 7204 | goto err_pci_uninit; |
3efb960f L |
7205 | } |
7206 | ||
7207 | /* Firmware command initialize */ | |
46a3df9f S |
7208 | ret = hclge_cmd_init(hdev); |
7209 | if (ret) | |
e0027501 | 7210 | goto err_cmd_uninit; |
46a3df9f S |
7211 | |
7212 | ret = hclge_get_cap(hdev); | |
7213 | if (ret) { | |
e00e2197 CIK |
7214 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", |
7215 | ret); | |
e0027501 | 7216 | goto err_cmd_uninit; |
46a3df9f S |
7217 | } |
7218 | ||
7219 | ret = hclge_configure(hdev); | |
7220 | if (ret) { | |
7221 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
e0027501 | 7222 | goto err_cmd_uninit; |
46a3df9f S |
7223 | } |
7224 | ||
887c3820 | 7225 | ret = hclge_init_msi(hdev); |
46a3df9f | 7226 | if (ret) { |
887c3820 | 7227 | dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); |
e0027501 | 7228 | goto err_cmd_uninit; |
46a3df9f S |
7229 | } |
7230 | ||
466b0c00 L |
7231 | ret = hclge_misc_irq_init(hdev); |
7232 | if (ret) { | |
7233 | dev_err(&pdev->dev, | |
7234 | "Misc IRQ(vector0) init error, ret = %d.\n", | |
7235 | ret); | |
e0027501 | 7236 | goto err_msi_uninit; |
466b0c00 L |
7237 | } |
7238 | ||
46a3df9f S |
7239 | ret = hclge_alloc_tqps(hdev); |
7240 | if (ret) { | |
7241 | dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); | |
e0027501 | 7242 | goto err_msi_irq_uninit; |
46a3df9f S |
7243 | } |
7244 | ||
7245 | ret = hclge_alloc_vport(hdev); | |
7246 | if (ret) { | |
7247 | dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); | |
e0027501 | 7248 | goto err_msi_irq_uninit; |
46a3df9f S |
7249 | } |
7250 | ||
7df7dad6 L |
7251 | ret = hclge_map_tqp(hdev); |
7252 | if (ret) { | |
7253 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
bc59f827 | 7254 | goto err_msi_irq_uninit; |
7df7dad6 L |
7255 | } |
7256 | ||
dea9a821 HT |
7257 | if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { |
7258 | ret = hclge_mac_mdio_config(hdev); | |
7259 | if (ret) { | |
7260 | dev_err(&hdev->pdev->dev, | |
7261 | "mdio config fail ret=%d\n", ret); | |
bc59f827 | 7262 | goto err_msi_irq_uninit; |
dea9a821 | 7263 | } |
cf9cca2d | 7264 | } |
7265 | ||
2da5ec58 JS |
7266 | ret = hclge_init_umv_space(hdev); |
7267 | if (ret) { | |
7268 | dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); | |
7269 | goto err_msi_irq_uninit; | |
7270 | } | |
7271 | ||
46a3df9f S |
7272 | ret = hclge_mac_init(hdev); |
7273 | if (ret) { | |
7274 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
e0027501 | 7275 | goto err_mdiobus_unreg; |
46a3df9f | 7276 | } |
46a3df9f S |
7277 | |
7278 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); | |
7279 | if (ret) { | |
7280 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
e0027501 | 7281 | goto err_mdiobus_unreg; |
46a3df9f S |
7282 | } |
7283 | ||
73f88b00 PL |
7284 | ret = hclge_config_gro(hdev, true); |
7285 | if (ret) | |
7286 | goto err_mdiobus_unreg; | |
7287 | ||
46a3df9f S |
7288 | ret = hclge_init_vlan_config(hdev); |
7289 | if (ret) { | |
7290 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
e0027501 | 7291 | goto err_mdiobus_unreg; |
46a3df9f S |
7292 | } |
7293 | ||
7294 | ret = hclge_tm_schd_init(hdev); | |
7295 | if (ret) { | |
7296 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
e0027501 | 7297 | goto err_mdiobus_unreg; |
68ece54e YL |
7298 | } |
7299 | ||
8015bb74 | 7300 | hclge_rss_init_cfg(hdev); |
68ece54e YL |
7301 | ret = hclge_rss_init_hw(hdev); |
7302 | if (ret) { | |
7303 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
e0027501 | 7304 | goto err_mdiobus_unreg; |
46a3df9f S |
7305 | } |
7306 | ||
635bfb58 FL |
7307 | ret = init_mgr_tbl(hdev); |
7308 | if (ret) { | |
7309 | dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); | |
e0027501 | 7310 | goto err_mdiobus_unreg; |
635bfb58 FL |
7311 | } |
7312 | ||
10a954bc JS |
7313 | ret = hclge_init_fd_config(hdev); |
7314 | if (ret) { | |
7315 | dev_err(&pdev->dev, | |
7316 | "fd table init fail, ret=%d\n", ret); | |
7317 | goto err_mdiobus_unreg; | |
7318 | } | |
7319 | ||
9f53588e SJ |
7320 | ret = hclge_hw_error_set_state(hdev, true); |
7321 | if (ret) { | |
7322 | dev_err(&pdev->dev, | |
9ee5dbbb | 7323 | "fail(%d) to enable hw error interrupts\n", ret); |
9f53588e SJ |
7324 | goto err_mdiobus_unreg; |
7325 | } | |
7326 | ||
cacde272 YL |
7327 | hclge_dcb_ops_set(hdev); |
7328 | ||
d039ef68 | 7329 | timer_setup(&hdev->service_timer, hclge_service_timer, 0); |
1afdb53a | 7330 | timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); |
46a3df9f | 7331 | INIT_WORK(&hdev->service_task, hclge_service_task); |
ed4a1bb8 | 7332 | INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); |
22fd3468 | 7333 | INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); |
46a3df9f | 7334 | |
9ab4ad14 XW |
7335 | hclge_clear_all_event_cause(hdev); |
7336 | ||
466b0c00 L |
7337 | /* Enable MISC vector(vector0) */ |
7338 | hclge_enable_vector(&hdev->misc_vector, true); | |
7339 | ||
2ec3d9f0 | 7340 | hclge_state_init(hdev); |
1a2f7bf2 | 7341 | hdev->last_reset_time = jiffies; |
46a3df9f S |
7342 | |
7343 | pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); | |
7344 | return 0; | |
7345 | ||
e0027501 HT |
7346 | err_mdiobus_unreg: |
7347 | if (hdev->hw.mac.phydev) | |
7348 | mdiobus_unregister(hdev->hw.mac.mdio_bus); | |
e0027501 HT |
7349 | err_msi_irq_uninit: |
7350 | hclge_misc_irq_uninit(hdev); | |
7351 | err_msi_uninit: | |
7352 | pci_free_irq_vectors(pdev); | |
7353 | err_cmd_uninit: | |
7354 | hclge_destroy_cmd_queue(&hdev->hw); | |
7355 | err_pci_uninit: | |
7d6d639b | 7356 | pcim_iounmap(pdev, hdev->hw.io_base); |
e0027501 | 7357 | pci_clear_master(pdev); |
46a3df9f | 7358 | pci_release_regions(pdev); |
e0027501 | 7359 | pci_disable_device(pdev); |
e0027501 | 7360 | out: |
46a3df9f S |
7361 | return ret; |
7362 | } | |
7363 | ||
c6dc5213 | 7364 | static void hclge_stats_clear(struct hclge_dev *hdev) |
7365 | { | |
7366 | memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); | |
7367 | } | |
7368 | ||
337460de YL |
7369 | static void hclge_reset_vport_state(struct hclge_dev *hdev) |
7370 | { | |
7371 | struct hclge_vport *vport = hdev->vport; | |
7372 | int i; | |
7373 | ||
7374 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
7375 | hclge_vport_start(vport); | |
7376 | vport++; | |
7377 | } | |
7378 | } | |
7379 | ||
4ed340ab L |
7380 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) |
7381 | { | |
7382 | struct hclge_dev *hdev = ae_dev->priv; | |
7383 | struct pci_dev *pdev = ae_dev->pdev; | |
7384 | int ret; | |
7385 | ||
7386 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7387 | ||
c6dc5213 | 7388 | hclge_stats_clear(hdev); |
4e66632d | 7389 | memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); |
c6dc5213 | 7390 | |
4ed340ab L |
7391 | ret = hclge_cmd_init(hdev); |
7392 | if (ret) { | |
7393 | dev_err(&pdev->dev, "Cmd queue init failed\n"); | |
7394 | return ret; | |
7395 | } | |
7396 | ||
7397 | ret = hclge_get_cap(hdev); | |
7398 | if (ret) { | |
7399 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", | |
7400 | ret); | |
7401 | return ret; | |
7402 | } | |
7403 | ||
7404 | ret = hclge_configure(hdev); | |
7405 | if (ret) { | |
7406 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
7407 | return ret; | |
7408 | } | |
7409 | ||
7410 | ret = hclge_map_tqp(hdev); | |
7411 | if (ret) { | |
7412 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
7413 | return ret; | |
7414 | } | |
7415 | ||
2da5ec58 JS |
7416 | hclge_reset_umv_space(hdev); |
7417 | ||
4ed340ab L |
7418 | ret = hclge_mac_init(hdev); |
7419 | if (ret) { | |
7420 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
7421 | return ret; | |
7422 | } | |
7423 | ||
4ed340ab L |
7424 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); |
7425 | if (ret) { | |
7426 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
7427 | return ret; | |
7428 | } | |
7429 | ||
73f88b00 PL |
7430 | ret = hclge_config_gro(hdev, true); |
7431 | if (ret) | |
7432 | return ret; | |
7433 | ||
4ed340ab L |
7434 | ret = hclge_init_vlan_config(hdev); |
7435 | if (ret) { | |
7436 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
7437 | return ret; | |
7438 | } | |
7439 | ||
d85f1ab5 | 7440 | ret = hclge_tm_init_hw(hdev); |
4ed340ab | 7441 | if (ret) { |
d85f1ab5 | 7442 | dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); |
4ed340ab L |
7443 | return ret; |
7444 | } | |
7445 | ||
7446 | ret = hclge_rss_init_hw(hdev); | |
7447 | if (ret) { | |
7448 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
7449 | return ret; | |
7450 | } | |
7451 | ||
10a954bc JS |
7452 | ret = hclge_init_fd_config(hdev); |
7453 | if (ret) { | |
7454 | dev_err(&pdev->dev, | |
7455 | "fd table init fail, ret=%d\n", ret); | |
7456 | return ret; | |
7457 | } | |
7458 | ||
9ee5dbbb SJ |
7459 | /* Re-enable the hw error interrupts because |
7460 | * the interrupts get disabled on core/global reset. | |
78807a3d | 7461 | */ |
9ee5dbbb SJ |
7462 | ret = hclge_hw_error_set_state(hdev, true); |
7463 | if (ret) { | |
7464 | dev_err(&pdev->dev, | |
7465 | "fail(%d) to re-enable HNS hw error interrupts\n", ret); | |
7466 | return ret; | |
7467 | } | |
78807a3d | 7468 | |
337460de YL |
7469 | hclge_reset_vport_state(hdev); |
7470 | ||
4ed340ab L |
7471 | dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", |
7472 | HCLGE_DRIVER_NAME); | |
7473 | ||
7474 | return 0; | |
7475 | } | |
7476 | ||
46a3df9f S |
7477 | static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
7478 | { | |
7479 | struct hclge_dev *hdev = ae_dev->priv; | |
7480 | struct hclge_mac *mac = &hdev->hw.mac; | |
7481 | ||
2ec3d9f0 | 7482 | hclge_state_uninit(hdev); |
46a3df9f S |
7483 | |
7484 | if (mac->phydev) | |
7485 | mdiobus_unregister(mac->mdio_bus); | |
7486 | ||
2da5ec58 JS |
7487 | hclge_uninit_umv_space(hdev); |
7488 | ||
466b0c00 L |
7489 | /* Disable MISC vector(vector0) */ |
7490 | hclge_enable_vector(&hdev->misc_vector, false); | |
9ab4ad14 XW |
7491 | synchronize_irq(hdev->misc_vector.vector_irq); |
7492 | ||
9f53588e | 7493 | hclge_hw_error_set_state(hdev, false); |
46a3df9f | 7494 | hclge_destroy_cmd_queue(&hdev->hw); |
202f2014 | 7495 | hclge_misc_irq_uninit(hdev); |
46a3df9f | 7496 | hclge_pci_uninit(hdev); |
b2c04029 | 7497 | mutex_destroy(&hdev->vport_lock); |
46a3df9f S |
7498 | ae_dev->priv = NULL; |
7499 | } | |
7500 | ||
4f645a90 PL |
7501 | static u32 hclge_get_max_channels(struct hnae3_handle *handle) |
7502 | { | |
7503 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
7504 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7505 | struct hclge_dev *hdev = vport->back; | |
7506 | ||
7507 | return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); | |
7508 | } | |
7509 | ||
7510 | static void hclge_get_channels(struct hnae3_handle *handle, | |
7511 | struct ethtool_channels *ch) | |
7512 | { | |
7513 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7514 | ||
7515 | ch->max_combined = hclge_get_max_channels(handle); | |
7516 | ch->other_count = 1; | |
7517 | ch->max_other = 1; | |
7518 | ch->combined_count = vport->alloc_tqps; | |
7519 | } | |
7520 | ||
f1f779ce | 7521 | static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, |
08ca3d58 | 7522 | u16 *alloc_tqps, u16 *max_rss_size) |
f1f779ce PL |
7523 | { |
7524 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7525 | struct hclge_dev *hdev = vport->back; | |
f1f779ce | 7526 | |
08ca3d58 | 7527 | *alloc_tqps = vport->alloc_tqps; |
f1f779ce PL |
7528 | *max_rss_size = hdev->rss_size_max; |
7529 | } | |
7530 | ||
7531 | static void hclge_release_tqp(struct hclge_vport *vport) | |
7532 | { | |
7533 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7534 | struct hclge_dev *hdev = vport->back; | |
7535 | int i; | |
7536 | ||
7537 | for (i = 0; i < kinfo->num_tqps; i++) { | |
7538 | struct hclge_tqp *tqp = | |
7539 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
7540 | ||
7541 | tqp->q.handle = NULL; | |
7542 | tqp->q.tqp_index = 0; | |
7543 | tqp->alloced = false; | |
7544 | } | |
7545 | ||
7546 | devm_kfree(&hdev->pdev->dev, kinfo->tqp); | |
7547 | kinfo->tqp = NULL; | |
7548 | } | |
7549 | ||
7550 | static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) | |
7551 | { | |
7552 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7553 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7554 | struct hclge_dev *hdev = vport->back; | |
7555 | int cur_rss_size = kinfo->rss_size; | |
7556 | int cur_tqps = kinfo->num_tqps; | |
7557 | u16 tc_offset[HCLGE_MAX_TC_NUM]; | |
7558 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | |
7559 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
7560 | u16 roundup_size; | |
7561 | u32 *rss_indir; | |
7562 | int ret, i; | |
7563 | ||
f73c9107 | 7564 | /* Free old tqps, and reallocate with new tqp number when nic setup */ |
f1f779ce PL |
7565 | hclge_release_tqp(vport); |
7566 | ||
81356b1f | 7567 | ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); |
f1f779ce PL |
7568 | if (ret) { |
7569 | dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); | |
7570 | return ret; | |
7571 | } | |
7572 | ||
7573 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
7574 | if (ret) { | |
7575 | dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); | |
7576 | return ret; | |
7577 | } | |
7578 | ||
7579 | ret = hclge_tm_schd_init(hdev); | |
7580 | if (ret) { | |
7581 | dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
7582 | return ret; | |
7583 | } | |
7584 | ||
7585 | roundup_size = roundup_pow_of_two(kinfo->rss_size); | |
7586 | roundup_size = ilog2(roundup_size); | |
7587 | /* Set the RSS TC mode according to the new RSS size */ | |
7588 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
7589 | tc_valid[i] = 0; | |
7590 | ||
7591 | if (!(hdev->hw_tc_map & BIT(i))) | |
7592 | continue; | |
7593 | ||
7594 | tc_valid[i] = 1; | |
7595 | tc_size[i] = roundup_size; | |
7596 | tc_offset[i] = kinfo->rss_size * i; | |
7597 | } | |
7598 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); | |
7599 | if (ret) | |
7600 | return ret; | |
7601 | ||
7602 | /* Reinitializes the rss indirect table according to the new RSS size */ | |
7603 | rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); | |
7604 | if (!rss_indir) | |
7605 | return -ENOMEM; | |
7606 | ||
7607 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
7608 | rss_indir[i] = i % kinfo->rss_size; | |
7609 | ||
7610 | ret = hclge_set_rss(handle, rss_indir, NULL, 0); | |
7611 | if (ret) | |
7612 | dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", | |
7613 | ret); | |
7614 | ||
7615 | kfree(rss_indir); | |
7616 | ||
7617 | if (!ret) | |
7618 | dev_info(&hdev->pdev->dev, | |
7619 | "Channels changed, rss_size from %d to %d, tqps from %d to %d", | |
7620 | cur_rss_size, kinfo->rss_size, | |
7621 | cur_tqps, kinfo->rss_size * kinfo->num_tc); | |
7622 | ||
7623 | return ret; | |
7624 | } | |
7625 | ||
db2a3e43 FL |
7626 | static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, |
7627 | u32 *regs_num_64_bit) | |
7628 | { | |
7629 | struct hclge_desc desc; | |
7630 | u32 total_num; | |
7631 | int ret; | |
7632 | ||
7633 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); | |
7634 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7635 | if (ret) { | |
7636 | dev_err(&hdev->pdev->dev, | |
7637 | "Query register number cmd failed, ret = %d.\n", ret); | |
7638 | return ret; | |
7639 | } | |
7640 | ||
7641 | *regs_num_32_bit = le32_to_cpu(desc.data[0]); | |
7642 | *regs_num_64_bit = le32_to_cpu(desc.data[1]); | |
7643 | ||
7644 | total_num = *regs_num_32_bit + *regs_num_64_bit; | |
7645 | if (!total_num) | |
7646 | return -EINVAL; | |
7647 | ||
7648 | return 0; | |
7649 | } | |
7650 | ||
7651 | static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7652 | void *data) | |
7653 | { | |
7654 | #define HCLGE_32_BIT_REG_RTN_DATANUM 8 | |
7655 | ||
7656 | struct hclge_desc *desc; | |
7657 | u32 *reg_val = data; | |
7658 | __le32 *desc_data; | |
7659 | int cmd_num; | |
7660 | int i, k, n; | |
7661 | int ret; | |
7662 | ||
7663 | if (regs_num == 0) | |
7664 | return 0; | |
7665 | ||
7666 | cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); | |
7667 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7668 | if (!desc) | |
7669 | return -ENOMEM; | |
7670 | ||
7671 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); | |
7672 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7673 | if (ret) { | |
7674 | dev_err(&hdev->pdev->dev, | |
7675 | "Query 32 bit register cmd failed, ret = %d.\n", ret); | |
7676 | kfree(desc); | |
7677 | return ret; | |
7678 | } | |
7679 | ||
7680 | for (i = 0; i < cmd_num; i++) { | |
7681 | if (i == 0) { | |
7682 | desc_data = (__le32 *)(&desc[i].data[0]); | |
7683 | n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; | |
7684 | } else { | |
7685 | desc_data = (__le32 *)(&desc[i]); | |
7686 | n = HCLGE_32_BIT_REG_RTN_DATANUM; | |
7687 | } | |
7688 | for (k = 0; k < n; k++) { | |
7689 | *reg_val++ = le32_to_cpu(*desc_data++); | |
7690 | ||
7691 | regs_num--; | |
7692 | if (!regs_num) | |
7693 | break; | |
7694 | } | |
7695 | } | |
7696 | ||
7697 | kfree(desc); | |
7698 | return 0; | |
7699 | } | |
7700 | ||
7701 | static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7702 | void *data) | |
7703 | { | |
7704 | #define HCLGE_64_BIT_REG_RTN_DATANUM 4 | |
7705 | ||
7706 | struct hclge_desc *desc; | |
7707 | u64 *reg_val = data; | |
7708 | __le64 *desc_data; | |
7709 | int cmd_num; | |
7710 | int i, k, n; | |
7711 | int ret; | |
7712 | ||
7713 | if (regs_num == 0) | |
7714 | return 0; | |
7715 | ||
7716 | cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); | |
7717 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7718 | if (!desc) | |
7719 | return -ENOMEM; | |
7720 | ||
7721 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); | |
7722 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7723 | if (ret) { | |
7724 | dev_err(&hdev->pdev->dev, | |
7725 | "Query 64 bit register cmd failed, ret = %d.\n", ret); | |
7726 | kfree(desc); | |
7727 | return ret; | |
7728 | } | |
7729 | ||
7730 | for (i = 0; i < cmd_num; i++) { | |
7731 | if (i == 0) { | |
7732 | desc_data = (__le64 *)(&desc[i].data[0]); | |
7733 | n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; | |
7734 | } else { | |
7735 | desc_data = (__le64 *)(&desc[i]); | |
7736 | n = HCLGE_64_BIT_REG_RTN_DATANUM; | |
7737 | } | |
7738 | for (k = 0; k < n; k++) { | |
7739 | *reg_val++ = le64_to_cpu(*desc_data++); | |
7740 | ||
7741 | regs_num--; | |
7742 | if (!regs_num) | |
7743 | break; | |
7744 | } | |
7745 | } | |
7746 | ||
7747 | kfree(desc); | |
7748 | return 0; | |
7749 | } | |
7750 | ||
a1018e31 JS |
7751 | #define MAX_SEPARATE_NUM 4 |
7752 | #define SEPARATOR_VALUE 0xFFFFFFFF | |
7753 | #define REG_NUM_PER_LINE 4 | |
7754 | #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) | |
7755 | ||
db2a3e43 FL |
7756 | static int hclge_get_regs_len(struct hnae3_handle *handle) |
7757 | { | |
a1018e31 JS |
7758 | int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; |
7759 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
db2a3e43 FL |
7760 | struct hclge_vport *vport = hclge_get_vport(handle); |
7761 | struct hclge_dev *hdev = vport->back; | |
7762 | u32 regs_num_32_bit, regs_num_64_bit; | |
7763 | int ret; | |
7764 | ||
7765 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7766 | if (ret) { | |
7767 | dev_err(&hdev->pdev->dev, | |
7768 | "Get register number failed, ret = %d.\n", ret); | |
7769 | return -EOPNOTSUPP; | |
7770 | } | |
7771 | ||
a1018e31 JS |
7772 | cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; |
7773 | common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7774 | ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7775 | tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; | |
7776 | ||
7777 | return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + | |
7778 | tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + | |
7779 | regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); | |
db2a3e43 FL |
7780 | } |
7781 | ||
7782 | static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, | |
7783 | void *data) | |
7784 | { | |
a1018e31 | 7785 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; |
db2a3e43 FL |
7786 | struct hclge_vport *vport = hclge_get_vport(handle); |
7787 | struct hclge_dev *hdev = vport->back; | |
7788 | u32 regs_num_32_bit, regs_num_64_bit; | |
a1018e31 JS |
7789 | int i, j, reg_um, separator_num; |
7790 | u32 *reg = data; | |
db2a3e43 FL |
7791 | int ret; |
7792 | ||
7793 | *version = hdev->fw_version; | |
7794 | ||
7795 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7796 | if (ret) { | |
7797 | dev_err(&hdev->pdev->dev, | |
7798 | "Get register number failed, ret = %d.\n", ret); | |
7799 | return; | |
7800 | } | |
7801 | ||
a1018e31 JS |
7802 | /* fetching per-PF registers valus from PF PCIe register space */ |
7803 | reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); | |
7804 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7805 | for (i = 0; i < reg_um; i++) | |
7806 | *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); | |
7807 | for (i = 0; i < separator_num; i++) | |
7808 | *reg++ = SEPARATOR_VALUE; | |
7809 | ||
7810 | reg_um = sizeof(common_reg_addr_list) / sizeof(u32); | |
7811 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7812 | for (i = 0; i < reg_um; i++) | |
7813 | *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); | |
7814 | for (i = 0; i < separator_num; i++) | |
7815 | *reg++ = SEPARATOR_VALUE; | |
7816 | ||
7817 | reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); | |
7818 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7819 | for (j = 0; j < kinfo->num_tqps; j++) { | |
7820 | for (i = 0; i < reg_um; i++) | |
7821 | *reg++ = hclge_read_dev(&hdev->hw, | |
7822 | ring_reg_addr_list[i] + | |
7823 | 0x200 * j); | |
7824 | for (i = 0; i < separator_num; i++) | |
7825 | *reg++ = SEPARATOR_VALUE; | |
7826 | } | |
7827 | ||
7828 | reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); | |
7829 | separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; | |
7830 | for (j = 0; j < hdev->num_msi_used - 1; j++) { | |
7831 | for (i = 0; i < reg_um; i++) | |
7832 | *reg++ = hclge_read_dev(&hdev->hw, | |
7833 | tqp_intr_reg_addr_list[i] + | |
7834 | 4 * j); | |
7835 | for (i = 0; i < separator_num; i++) | |
7836 | *reg++ = SEPARATOR_VALUE; | |
7837 | } | |
7838 | ||
7839 | /* fetching PF common registers values from firmware */ | |
7840 | ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); | |
db2a3e43 FL |
7841 | if (ret) { |
7842 | dev_err(&hdev->pdev->dev, | |
7843 | "Get 32 bit register failed, ret = %d.\n", ret); | |
7844 | return; | |
7845 | } | |
7846 | ||
a1018e31 JS |
7847 | reg += regs_num_32_bit; |
7848 | ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); | |
db2a3e43 FL |
7849 | if (ret) |
7850 | dev_err(&hdev->pdev->dev, | |
7851 | "Get 64 bit register failed, ret = %d.\n", ret); | |
7852 | } | |
7853 | ||
fe3a3e15 | 7854 | static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) |
d9a0884e JS |
7855 | { |
7856 | struct hclge_set_led_state_cmd *req; | |
7857 | struct hclge_desc desc; | |
7858 | int ret; | |
7859 | ||
7860 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); | |
7861 | ||
7862 | req = (struct hclge_set_led_state_cmd *)desc.data; | |
ccc23ef3 PL |
7863 | hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, |
7864 | HCLGE_LED_LOCATE_STATE_S, locate_led_status); | |
d9a0884e JS |
7865 | |
7866 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7867 | if (ret) | |
7868 | dev_err(&hdev->pdev->dev, | |
7869 | "Send set led state cmd error, ret =%d\n", ret); | |
7870 | ||
7871 | return ret; | |
7872 | } | |
7873 | ||
7874 | enum hclge_led_status { | |
7875 | HCLGE_LED_OFF, | |
7876 | HCLGE_LED_ON, | |
7877 | HCLGE_LED_NO_CHANGE = 0xFF, | |
7878 | }; | |
7879 | ||
7880 | static int hclge_set_led_id(struct hnae3_handle *handle, | |
7881 | enum ethtool_phys_id_state status) | |
7882 | { | |
d9a0884e JS |
7883 | struct hclge_vport *vport = hclge_get_vport(handle); |
7884 | struct hclge_dev *hdev = vport->back; | |
d9a0884e JS |
7885 | |
7886 | switch (status) { | |
7887 | case ETHTOOL_ID_ACTIVE: | |
fe3a3e15 | 7888 | return hclge_set_led_status(hdev, HCLGE_LED_ON); |
d9a0884e | 7889 | case ETHTOOL_ID_INACTIVE: |
fe3a3e15 | 7890 | return hclge_set_led_status(hdev, HCLGE_LED_OFF); |
d9a0884e | 7891 | default: |
fe3a3e15 | 7892 | return -EINVAL; |
d9a0884e | 7893 | } |
d9a0884e JS |
7894 | } |
7895 | ||
d92ceae9 FL |
7896 | static void hclge_get_link_mode(struct hnae3_handle *handle, |
7897 | unsigned long *supported, | |
7898 | unsigned long *advertising) | |
7899 | { | |
7900 | unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); | |
7901 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7902 | struct hclge_dev *hdev = vport->back; | |
7903 | unsigned int idx = 0; | |
7904 | ||
7905 | for (; idx < size; idx++) { | |
7906 | supported[idx] = hdev->hw.mac.supported[idx]; | |
7907 | advertising[idx] = hdev->hw.mac.advertising[idx]; | |
7908 | } | |
7909 | } | |
7910 | ||
46a3df9f S |
7911 | static const struct hnae3_ae_ops hclge_ops = { |
7912 | .init_ae_dev = hclge_init_ae_dev, | |
7913 | .uninit_ae_dev = hclge_uninit_ae_dev, | |
26977990 HT |
7914 | .flr_prepare = hclge_flr_prepare, |
7915 | .flr_done = hclge_flr_done, | |
46a3df9f S |
7916 | .init_client_instance = hclge_init_client_instance, |
7917 | .uninit_client_instance = hclge_uninit_client_instance, | |
63d7e66f SM |
7918 | .map_ring_to_vector = hclge_map_ring_to_vector, |
7919 | .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, | |
46a3df9f | 7920 | .get_vector = hclge_get_vector, |
7412200c | 7921 | .put_vector = hclge_put_vector, |
46a3df9f | 7922 | .set_promisc_mode = hclge_set_promisc_mode, |
c39c4d98 | 7923 | .set_loopback = hclge_set_loopback, |
46a3df9f S |
7924 | .start = hclge_ae_start, |
7925 | .stop = hclge_ae_stop, | |
337460de YL |
7926 | .client_start = hclge_client_start, |
7927 | .client_stop = hclge_client_stop, | |
46a3df9f S |
7928 | .get_status = hclge_get_status, |
7929 | .get_ksettings_an_result = hclge_get_ksettings_an_result, | |
7930 | .update_speed_duplex_h = hclge_update_speed_duplex_h, | |
7931 | .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, | |
7932 | .get_media_type = hclge_get_media_type, | |
7933 | .get_rss_key_size = hclge_get_rss_key_size, | |
7934 | .get_rss_indir_size = hclge_get_rss_indir_size, | |
7935 | .get_rss = hclge_get_rss, | |
7936 | .set_rss = hclge_set_rss, | |
f7db940a | 7937 | .set_rss_tuple = hclge_set_rss_tuple, |
07d29954 | 7938 | .get_rss_tuple = hclge_get_rss_tuple, |
46a3df9f S |
7939 | .get_tc_size = hclge_get_tc_size, |
7940 | .get_mac_addr = hclge_get_mac_addr, | |
7941 | .set_mac_addr = hclge_set_mac_addr, | |
a185d723 | 7942 | .do_ioctl = hclge_do_ioctl, |
46a3df9f S |
7943 | .add_uc_addr = hclge_add_uc_addr, |
7944 | .rm_uc_addr = hclge_rm_uc_addr, | |
7945 | .add_mc_addr = hclge_add_mc_addr, | |
7946 | .rm_mc_addr = hclge_rm_mc_addr, | |
7947 | .set_autoneg = hclge_set_autoneg, | |
7948 | .get_autoneg = hclge_get_autoneg, | |
7949 | .get_pauseparam = hclge_get_pauseparam, | |
09ea401e | 7950 | .set_pauseparam = hclge_set_pauseparam, |
46a3df9f S |
7951 | .set_mtu = hclge_set_mtu, |
7952 | .reset_queue = hclge_reset_tqp, | |
7953 | .get_stats = hclge_get_stats, | |
7954 | .update_stats = hclge_update_stats, | |
7955 | .get_strings = hclge_get_strings, | |
7956 | .get_sset_count = hclge_get_sset_count, | |
7957 | .get_fw_version = hclge_get_fw_version, | |
7958 | .get_mdix_mode = hclge_get_mdix_mode, | |
d818396d | 7959 | .enable_vlan_filter = hclge_enable_vlan_filter, |
4e66632d | 7960 | .set_vlan_filter = hclge_set_vlan_filter, |
46a3df9f | 7961 | .set_vf_vlan_filter = hclge_set_vf_vlan_filter, |
5f9a7732 | 7962 | .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, |
4ed340ab | 7963 | .reset_event = hclge_reset_event, |
2c883d73 | 7964 | .set_default_reset_request = hclge_set_def_reset_request, |
f1f779ce PL |
7965 | .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, |
7966 | .set_channels = hclge_set_channels, | |
4f645a90 | 7967 | .get_channels = hclge_get_channels, |
db2a3e43 FL |
7968 | .get_regs_len = hclge_get_regs_len, |
7969 | .get_regs = hclge_get_regs, | |
d9a0884e | 7970 | .set_led_id = hclge_set_led_id, |
d92ceae9 | 7971 | .get_link_mode = hclge_get_link_mode, |
3ca8e27c JS |
7972 | .add_fd_entry = hclge_add_fd_entry, |
7973 | .del_fd_entry = hclge_del_fd_entry, | |
7ce98982 | 7974 | .del_all_fd_entries = hclge_del_all_fd_entries, |
295043a7 JS |
7975 | .get_fd_rule_cnt = hclge_get_fd_rule_cnt, |
7976 | .get_fd_rule_info = hclge_get_fd_rule_info, | |
7977 | .get_fd_all_rules = hclge_get_all_rules, | |
7ce98982 | 7978 | .restore_fd_rules = hclge_restore_fd_entries, |
d1f04a80 | 7979 | .enable_fd = hclge_enable_fd, |
bf4fd28d | 7980 | .dbg_run_cmd = hclge_dbg_run_cmd, |
af72a21f | 7981 | .handle_hw_ras_error = hclge_handle_hw_ras_error, |
225c02eb HT |
7982 | .get_hw_reset_stat = hclge_get_hw_reset_stat, |
7983 | .ae_dev_resetting = hclge_ae_dev_resetting, | |
7984 | .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, | |
46a3df9f S |
7985 | }; |
7986 | ||
7987 | static struct hnae3_ae_algo ae_algo = { | |
7988 | .ops = &hclge_ops, | |
46a3df9f S |
7989 | .pdev_id_table = ae_algo_pci_tbl, |
7990 | }; | |
7991 | ||
7992 | static int hclge_init(void) | |
7993 | { | |
7994 | pr_info("%s is initializing\n", HCLGE_NAME); | |
7995 | ||
a4d090cc FL |
7996 | hnae3_register_ae_algo(&ae_algo); |
7997 | ||
7998 | return 0; | |
46a3df9f S |
7999 | } |
8000 | ||
8001 | static void hclge_exit(void) | |
8002 | { | |
8003 | hnae3_unregister_ae_algo(&ae_algo); | |
8004 | } | |
8005 | module_init(hclge_init); | |
8006 | module_exit(hclge_exit); | |
8007 | ||
8008 | MODULE_LICENSE("GPL"); | |
8009 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); | |
8010 | MODULE_DESCRIPTION("HCLGE Driver"); | |
8011 | MODULE_VERSION(HCLGE_MOD_VERSION); |