]>
Commit | Line | Data |
---|---|---|
ef57c40f JS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. | |
46a3df9f S |
3 | |
4 | #include <linux/acpi.h> | |
5 | #include <linux/device.h> | |
6 | #include <linux/etherdevice.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/platform_device.h> | |
7393ed39 | 14 | #include <linux/if_vlan.h> |
d5752031 | 15 | #include <net/rtnetlink.h> |
46a3df9f | 16 | #include "hclge_cmd.h" |
cacde272 | 17 | #include "hclge_dcb.h" |
46a3df9f | 18 | #include "hclge_main.h" |
0cdbdd3e | 19 | #include "hclge_mbx.h" |
46a3df9f S |
20 | #include "hclge_mdio.h" |
21 | #include "hclge_tm.h" | |
00bb612a | 22 | #include "hclge_err.h" |
46a3df9f S |
23 | #include "hnae3.h" |
24 | ||
25 | #define HCLGE_NAME "hclge" | |
26 | #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) | |
27 | #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) | |
46a3df9f | 28 | |
4ee09281 | 29 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); |
46a3df9f | 30 | static int hclge_init_vlan_config(struct hclge_dev *hdev); |
4ed340ab | 31 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); |
2da5ec58 JS |
32 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, |
33 | u16 *allocated_size, bool is_alloc); | |
46a3df9f S |
34 | |
35 | static struct hnae3_ae_algo ae_algo; | |
36 | ||
37 | static const struct pci_device_id ae_algo_pci_tbl[] = { | |
38 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | |
39 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | |
40 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | |
41 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | |
42 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | |
43 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | |
44 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | |
e92a0843 | 45 | /* required last entry */ |
46a3df9f S |
46 | {0, } |
47 | }; | |
48 | ||
28d9cec8 YL |
49 | MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); |
50 | ||
46a3df9f | 51 | static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { |
67b8c316 | 52 | "App Loopback test", |
86957272 FL |
53 | "Serdes serial Loopback test", |
54 | "Serdes parallel Loopback test", | |
46a3df9f S |
55 | "Phy Loopback test" |
56 | }; | |
57 | ||
46a3df9f S |
58 | static const struct hclge_comm_stats_str g_mac_stats_string[] = { |
59 | {"mac_tx_mac_pause_num", | |
60 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, | |
61 | {"mac_rx_mac_pause_num", | |
62 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, | |
63 | {"mac_tx_pfc_pri0_pkt_num", | |
64 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, | |
65 | {"mac_tx_pfc_pri1_pkt_num", | |
66 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, | |
67 | {"mac_tx_pfc_pri2_pkt_num", | |
68 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, | |
69 | {"mac_tx_pfc_pri3_pkt_num", | |
70 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, | |
71 | {"mac_tx_pfc_pri4_pkt_num", | |
72 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, | |
73 | {"mac_tx_pfc_pri5_pkt_num", | |
74 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, | |
75 | {"mac_tx_pfc_pri6_pkt_num", | |
76 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, | |
77 | {"mac_tx_pfc_pri7_pkt_num", | |
78 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, | |
79 | {"mac_rx_pfc_pri0_pkt_num", | |
80 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, | |
81 | {"mac_rx_pfc_pri1_pkt_num", | |
82 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, | |
83 | {"mac_rx_pfc_pri2_pkt_num", | |
84 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, | |
85 | {"mac_rx_pfc_pri3_pkt_num", | |
86 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, | |
87 | {"mac_rx_pfc_pri4_pkt_num", | |
88 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, | |
89 | {"mac_rx_pfc_pri5_pkt_num", | |
90 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, | |
91 | {"mac_rx_pfc_pri6_pkt_num", | |
92 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, | |
93 | {"mac_rx_pfc_pri7_pkt_num", | |
94 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, | |
95 | {"mac_tx_total_pkt_num", | |
96 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, | |
97 | {"mac_tx_total_oct_num", | |
98 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, | |
99 | {"mac_tx_good_pkt_num", | |
100 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, | |
101 | {"mac_tx_bad_pkt_num", | |
102 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, | |
103 | {"mac_tx_good_oct_num", | |
104 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, | |
105 | {"mac_tx_bad_oct_num", | |
106 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, | |
107 | {"mac_tx_uni_pkt_num", | |
108 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, | |
109 | {"mac_tx_multi_pkt_num", | |
110 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, | |
111 | {"mac_tx_broad_pkt_num", | |
112 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, | |
113 | {"mac_tx_undersize_pkt_num", | |
114 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, | |
f3426583 JS |
115 | {"mac_tx_oversize_pkt_num", |
116 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, | |
46a3df9f S |
117 | {"mac_tx_64_oct_pkt_num", |
118 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, | |
119 | {"mac_tx_65_127_oct_pkt_num", | |
120 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, | |
121 | {"mac_tx_128_255_oct_pkt_num", | |
122 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, | |
123 | {"mac_tx_256_511_oct_pkt_num", | |
124 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, | |
125 | {"mac_tx_512_1023_oct_pkt_num", | |
126 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, | |
127 | {"mac_tx_1024_1518_oct_pkt_num", | |
128 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, | |
b42874e4 JS |
129 | {"mac_tx_1519_2047_oct_pkt_num", |
130 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, | |
131 | {"mac_tx_2048_4095_oct_pkt_num", | |
132 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, | |
133 | {"mac_tx_4096_8191_oct_pkt_num", | |
134 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, | |
b42874e4 JS |
135 | {"mac_tx_8192_9216_oct_pkt_num", |
136 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, | |
137 | {"mac_tx_9217_12287_oct_pkt_num", | |
138 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, | |
139 | {"mac_tx_12288_16383_oct_pkt_num", | |
140 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, | |
141 | {"mac_tx_1519_max_good_pkt_num", | |
142 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, | |
143 | {"mac_tx_1519_max_bad_pkt_num", | |
144 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f S |
145 | {"mac_rx_total_pkt_num", |
146 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, | |
147 | {"mac_rx_total_oct_num", | |
148 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, | |
149 | {"mac_rx_good_pkt_num", | |
150 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, | |
151 | {"mac_rx_bad_pkt_num", | |
152 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, | |
153 | {"mac_rx_good_oct_num", | |
154 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, | |
155 | {"mac_rx_bad_oct_num", | |
156 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, | |
157 | {"mac_rx_uni_pkt_num", | |
158 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, | |
159 | {"mac_rx_multi_pkt_num", | |
160 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, | |
161 | {"mac_rx_broad_pkt_num", | |
162 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, | |
163 | {"mac_rx_undersize_pkt_num", | |
164 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, | |
f3426583 JS |
165 | {"mac_rx_oversize_pkt_num", |
166 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, | |
46a3df9f S |
167 | {"mac_rx_64_oct_pkt_num", |
168 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, | |
169 | {"mac_rx_65_127_oct_pkt_num", | |
170 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, | |
171 | {"mac_rx_128_255_oct_pkt_num", | |
172 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, | |
173 | {"mac_rx_256_511_oct_pkt_num", | |
174 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, | |
175 | {"mac_rx_512_1023_oct_pkt_num", | |
176 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, | |
177 | {"mac_rx_1024_1518_oct_pkt_num", | |
178 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, | |
b42874e4 JS |
179 | {"mac_rx_1519_2047_oct_pkt_num", |
180 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, | |
181 | {"mac_rx_2048_4095_oct_pkt_num", | |
182 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, | |
183 | {"mac_rx_4096_8191_oct_pkt_num", | |
184 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, | |
b42874e4 JS |
185 | {"mac_rx_8192_9216_oct_pkt_num", |
186 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, | |
187 | {"mac_rx_9217_12287_oct_pkt_num", | |
188 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, | |
189 | {"mac_rx_12288_16383_oct_pkt_num", | |
190 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, | |
191 | {"mac_rx_1519_max_good_pkt_num", | |
192 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, | |
193 | {"mac_rx_1519_max_bad_pkt_num", | |
194 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, | |
46a3df9f | 195 | |
c36317be JS |
196 | {"mac_tx_fragment_pkt_num", |
197 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, | |
198 | {"mac_tx_undermin_pkt_num", | |
199 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, | |
200 | {"mac_tx_jabber_pkt_num", | |
201 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, | |
202 | {"mac_tx_err_all_pkt_num", | |
203 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, | |
204 | {"mac_tx_from_app_good_pkt_num", | |
205 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, | |
206 | {"mac_tx_from_app_bad_pkt_num", | |
207 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, | |
208 | {"mac_rx_fragment_pkt_num", | |
209 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, | |
210 | {"mac_rx_undermin_pkt_num", | |
211 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, | |
212 | {"mac_rx_jabber_pkt_num", | |
213 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, | |
214 | {"mac_rx_fcs_err_pkt_num", | |
215 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, | |
216 | {"mac_rx_send_app_good_pkt_num", | |
217 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, | |
218 | {"mac_rx_send_app_bad_pkt_num", | |
219 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} | |
46a3df9f S |
220 | }; |
221 | ||
635bfb58 FL |
222 | static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { |
223 | { | |
224 | .flags = HCLGE_MAC_MGR_MASK_VLAN_B, | |
225 | .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), | |
226 | .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), | |
227 | .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), | |
228 | .i_port_bitmap = 0x1, | |
229 | }, | |
230 | }; | |
231 | ||
46a3df9f S |
232 | static int hclge_mac_update_stats(struct hclge_dev *hdev) |
233 | { | |
b42874e4 | 234 | #define HCLGE_MAC_CMD_NUM 21 |
46a3df9f S |
235 | #define HCLGE_RTN_DATA_NUM 4 |
236 | ||
237 | u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); | |
238 | struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; | |
a90bb9a5 | 239 | __le64 *desc_data; |
46a3df9f S |
240 | int i, k, n; |
241 | int ret; | |
242 | ||
243 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); | |
244 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); | |
245 | if (ret) { | |
246 | dev_err(&hdev->pdev->dev, | |
247 | "Get MAC pkt stats fail, status = %d.\n", ret); | |
248 | ||
249 | return ret; | |
250 | } | |
251 | ||
252 | for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { | |
253 | if (unlikely(i == 0)) { | |
a90bb9a5 | 254 | desc_data = (__le64 *)(&desc[i].data[0]); |
46a3df9f S |
255 | n = HCLGE_RTN_DATA_NUM - 2; |
256 | } else { | |
a90bb9a5 | 257 | desc_data = (__le64 *)(&desc[i]); |
46a3df9f S |
258 | n = HCLGE_RTN_DATA_NUM; |
259 | } | |
260 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 261 | *data++ += le64_to_cpu(*desc_data); |
46a3df9f S |
262 | desc_data++; |
263 | } | |
264 | } | |
265 | ||
266 | return 0; | |
267 | } | |
268 | ||
269 | static int hclge_tqps_update_stats(struct hnae3_handle *handle) | |
270 | { | |
271 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
272 | struct hclge_vport *vport = hclge_get_vport(handle); | |
273 | struct hclge_dev *hdev = vport->back; | |
274 | struct hnae3_queue *queue; | |
275 | struct hclge_desc desc[1]; | |
276 | struct hclge_tqp *tqp; | |
277 | int ret, i; | |
278 | ||
279 | for (i = 0; i < kinfo->num_tqps; i++) { | |
280 | queue = handle->kinfo.tqp[i]; | |
281 | tqp = container_of(queue, struct hclge_tqp, q); | |
282 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
283 | hclge_cmd_setup_basic_desc(&desc[0], | |
284 | HCLGE_OPC_QUERY_RX_STATUS, | |
285 | true); | |
286 | ||
a90bb9a5 | 287 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
288 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
289 | if (ret) { | |
290 | dev_err(&hdev->pdev->dev, | |
291 | "Query tqp stat fail, status = %d,queue = %d\n", | |
292 | ret, i); | |
293 | return ret; | |
294 | } | |
295 | tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += | |
93991b65 | 296 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
297 | } |
298 | ||
299 | for (i = 0; i < kinfo->num_tqps; i++) { | |
300 | queue = handle->kinfo.tqp[i]; | |
301 | tqp = container_of(queue, struct hclge_tqp, q); | |
302 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
303 | hclge_cmd_setup_basic_desc(&desc[0], | |
304 | HCLGE_OPC_QUERY_TX_STATUS, | |
305 | true); | |
306 | ||
a90bb9a5 | 307 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
308 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
309 | if (ret) { | |
310 | dev_err(&hdev->pdev->dev, | |
311 | "Query tqp stat fail, status = %d,queue = %d\n", | |
312 | ret, i); | |
313 | return ret; | |
314 | } | |
315 | tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += | |
93991b65 | 316 | le32_to_cpu(desc[0].data[1]); |
46a3df9f S |
317 | } |
318 | ||
319 | return 0; | |
320 | } | |
321 | ||
322 | static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) | |
323 | { | |
324 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
325 | struct hclge_tqp *tqp; | |
326 | u64 *buff = data; | |
327 | int i; | |
328 | ||
329 | for (i = 0; i < kinfo->num_tqps; i++) { | |
330 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 331 | *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; |
46a3df9f S |
332 | } |
333 | ||
334 | for (i = 0; i < kinfo->num_tqps; i++) { | |
335 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 336 | *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; |
46a3df9f S |
337 | } |
338 | ||
339 | return buff; | |
340 | } | |
341 | ||
342 | static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) | |
343 | { | |
344 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
345 | ||
346 | return kinfo->num_tqps * (2); | |
347 | } | |
348 | ||
349 | static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) | |
350 | { | |
351 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
352 | u8 *buff = data; | |
353 | int i = 0; | |
354 | ||
355 | for (i = 0; i < kinfo->num_tqps; i++) { | |
356 | struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], | |
357 | struct hclge_tqp, q); | |
eedff8c0 | 358 | snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", |
46a3df9f S |
359 | tqp->index); |
360 | buff = buff + ETH_GSTRING_LEN; | |
361 | } | |
362 | ||
363 | for (i = 0; i < kinfo->num_tqps; i++) { | |
364 | struct hclge_tqp *tqp = container_of(kinfo->tqp[i], | |
365 | struct hclge_tqp, q); | |
eedff8c0 | 366 | snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", |
46a3df9f S |
367 | tqp->index); |
368 | buff = buff + ETH_GSTRING_LEN; | |
369 | } | |
370 | ||
371 | return buff; | |
372 | } | |
373 | ||
374 | static u64 *hclge_comm_get_stats(void *comm_stats, | |
375 | const struct hclge_comm_stats_str strs[], | |
376 | int size, u64 *data) | |
377 | { | |
378 | u64 *buf = data; | |
379 | u32 i; | |
380 | ||
381 | for (i = 0; i < size; i++) | |
382 | buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); | |
383 | ||
384 | return buf + size; | |
385 | } | |
386 | ||
387 | static u8 *hclge_comm_get_strings(u32 stringset, | |
388 | const struct hclge_comm_stats_str strs[], | |
389 | int size, u8 *data) | |
390 | { | |
391 | char *buff = (char *)data; | |
392 | u32 i; | |
393 | ||
394 | if (stringset != ETH_SS_STATS) | |
395 | return buff; | |
396 | ||
397 | for (i = 0; i < size; i++) { | |
398 | snprintf(buff, ETH_GSTRING_LEN, | |
399 | strs[i].desc); | |
400 | buff = buff + ETH_GSTRING_LEN; | |
401 | } | |
402 | ||
403 | return (u8 *)buff; | |
404 | } | |
405 | ||
406 | static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, | |
407 | struct net_device_stats *net_stats) | |
408 | { | |
409 | net_stats->tx_dropped = 0; | |
f3426583 | 410 | net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 411 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; |
c36317be | 412 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
413 | |
414 | net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; | |
415 | net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; | |
416 | ||
c36317be | 417 | net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; |
46a3df9f S |
418 | net_stats->rx_length_errors = |
419 | hw_stats->mac_stats.mac_rx_undersize_pkt_num; | |
420 | net_stats->rx_length_errors += | |
f3426583 | 421 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f | 422 | net_stats->rx_over_errors = |
f3426583 | 423 | hw_stats->mac_stats.mac_rx_oversize_pkt_num; |
46a3df9f S |
424 | } |
425 | ||
426 | static void hclge_update_stats_for_all(struct hclge_dev *hdev) | |
427 | { | |
428 | struct hnae3_handle *handle; | |
429 | int status; | |
430 | ||
431 | handle = &hdev->vport[0].nic; | |
432 | if (handle->client) { | |
433 | status = hclge_tqps_update_stats(handle); | |
434 | if (status) { | |
435 | dev_err(&hdev->pdev->dev, | |
436 | "Update TQPS stats fail, status = %d.\n", | |
437 | status); | |
438 | } | |
439 | } | |
440 | ||
441 | status = hclge_mac_update_stats(hdev); | |
442 | if (status) | |
443 | dev_err(&hdev->pdev->dev, | |
444 | "Update MAC stats fail, status = %d.\n", status); | |
445 | ||
46a3df9f S |
446 | hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); |
447 | } | |
448 | ||
449 | static void hclge_update_stats(struct hnae3_handle *handle, | |
450 | struct net_device_stats *net_stats) | |
451 | { | |
452 | struct hclge_vport *vport = hclge_get_vport(handle); | |
453 | struct hclge_dev *hdev = vport->back; | |
454 | struct hclge_hw_stats *hw_stats = &hdev->hw_stats; | |
455 | int status; | |
456 | ||
7a5d2a39 JS |
457 | if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) |
458 | return; | |
459 | ||
46a3df9f S |
460 | status = hclge_mac_update_stats(hdev); |
461 | if (status) | |
462 | dev_err(&hdev->pdev->dev, | |
463 | "Update MAC stats fail, status = %d.\n", | |
464 | status); | |
465 | ||
46a3df9f S |
466 | status = hclge_tqps_update_stats(handle); |
467 | if (status) | |
468 | dev_err(&hdev->pdev->dev, | |
469 | "Update TQPS stats fail, status = %d.\n", | |
470 | status); | |
471 | ||
472 | hclge_update_netstat(hw_stats, net_stats); | |
7a5d2a39 JS |
473 | |
474 | clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); | |
46a3df9f S |
475 | } |
476 | ||
477 | static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) | |
478 | { | |
86957272 FL |
479 | #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ |
480 | HNAE3_SUPPORT_PHY_LOOPBACK |\ | |
481 | HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ | |
482 | HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) | |
46a3df9f S |
483 | |
484 | struct hclge_vport *vport = hclge_get_vport(handle); | |
485 | struct hclge_dev *hdev = vport->back; | |
486 | int count = 0; | |
487 | ||
488 | /* Loopback test support rules: | |
489 | * mac: only GE mode support | |
490 | * serdes: all mac mode will support include GE/XGE/LGE/CGE | |
491 | * phy: only support when phy device exist on board | |
492 | */ | |
493 | if (stringset == ETH_SS_TEST) { | |
494 | /* clear loopback bit flags at first */ | |
495 | handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); | |
735f1df8 | 496 | if (hdev->pdev->revision >= 0x21 || |
86957272 | 497 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || |
46a3df9f S |
498 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || |
499 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { | |
500 | count += 1; | |
67b8c316 | 501 | handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; |
46a3df9f | 502 | } |
e006bb00 | 503 | |
86957272 FL |
504 | count += 2; |
505 | handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; | |
506 | handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; | |
46a3df9f S |
507 | } else if (stringset == ETH_SS_STATS) { |
508 | count = ARRAY_SIZE(g_mac_stats_string) + | |
46a3df9f S |
509 | hclge_tqps_get_sset_count(handle, stringset); |
510 | } | |
511 | ||
512 | return count; | |
513 | } | |
514 | ||
515 | static void hclge_get_strings(struct hnae3_handle *handle, | |
516 | u32 stringset, | |
517 | u8 *data) | |
518 | { | |
519 | u8 *p = (char *)data; | |
520 | int size; | |
521 | ||
522 | if (stringset == ETH_SS_STATS) { | |
523 | size = ARRAY_SIZE(g_mac_stats_string); | |
524 | p = hclge_comm_get_strings(stringset, | |
525 | g_mac_stats_string, | |
526 | size, | |
527 | p); | |
46a3df9f S |
528 | p = hclge_tqps_get_strings(handle, p); |
529 | } else if (stringset == ETH_SS_TEST) { | |
67b8c316 | 530 | if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { |
46a3df9f | 531 | memcpy(p, |
67b8c316 | 532 | hns3_nic_test_strs[HNAE3_LOOP_APP], |
46a3df9f S |
533 | ETH_GSTRING_LEN); |
534 | p += ETH_GSTRING_LEN; | |
535 | } | |
86957272 | 536 | if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { |
46a3df9f | 537 | memcpy(p, |
86957272 FL |
538 | hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], |
539 | ETH_GSTRING_LEN); | |
540 | p += ETH_GSTRING_LEN; | |
541 | } | |
542 | if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { | |
543 | memcpy(p, | |
544 | hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], | |
46a3df9f S |
545 | ETH_GSTRING_LEN); |
546 | p += ETH_GSTRING_LEN; | |
547 | } | |
548 | if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { | |
549 | memcpy(p, | |
e05cfaaf | 550 | hns3_nic_test_strs[HNAE3_LOOP_PHY], |
46a3df9f S |
551 | ETH_GSTRING_LEN); |
552 | p += ETH_GSTRING_LEN; | |
553 | } | |
554 | } | |
555 | } | |
556 | ||
557 | static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) | |
558 | { | |
559 | struct hclge_vport *vport = hclge_get_vport(handle); | |
560 | struct hclge_dev *hdev = vport->back; | |
561 | u64 *p; | |
562 | ||
563 | p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, | |
564 | g_mac_stats_string, | |
565 | ARRAY_SIZE(g_mac_stats_string), | |
566 | data); | |
46a3df9f S |
567 | p = hclge_tqps_get_stats(handle, p); |
568 | } | |
569 | ||
570 | static int hclge_parse_func_status(struct hclge_dev *hdev, | |
d44f9b63 | 571 | struct hclge_func_status_cmd *status) |
46a3df9f S |
572 | { |
573 | if (!(status->pf_state & HCLGE_PF_STATE_DONE)) | |
574 | return -EINVAL; | |
575 | ||
576 | /* Set the pf to main pf */ | |
577 | if (status->pf_state & HCLGE_PF_STATE_MAIN) | |
578 | hdev->flag |= HCLGE_FLAG_MAIN; | |
579 | else | |
580 | hdev->flag &= ~HCLGE_FLAG_MAIN; | |
581 | ||
46a3df9f S |
582 | return 0; |
583 | } | |
584 | ||
585 | static int hclge_query_function_status(struct hclge_dev *hdev) | |
586 | { | |
d44f9b63 | 587 | struct hclge_func_status_cmd *req; |
46a3df9f S |
588 | struct hclge_desc desc; |
589 | int timeout = 0; | |
590 | int ret; | |
591 | ||
592 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); | |
d44f9b63 | 593 | req = (struct hclge_func_status_cmd *)desc.data; |
46a3df9f S |
594 | |
595 | do { | |
596 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
597 | if (ret) { | |
598 | dev_err(&hdev->pdev->dev, | |
599 | "query function status failed %d.\n", | |
600 | ret); | |
601 | ||
602 | return ret; | |
603 | } | |
604 | ||
605 | /* Check pf reset is done */ | |
606 | if (req->pf_state) | |
607 | break; | |
608 | usleep_range(1000, 2000); | |
609 | } while (timeout++ < 5); | |
610 | ||
611 | ret = hclge_parse_func_status(hdev, req); | |
612 | ||
613 | return ret; | |
614 | } | |
615 | ||
616 | static int hclge_query_pf_resource(struct hclge_dev *hdev) | |
617 | { | |
d44f9b63 | 618 | struct hclge_pf_res_cmd *req; |
46a3df9f S |
619 | struct hclge_desc desc; |
620 | int ret; | |
621 | ||
622 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); | |
623 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
624 | if (ret) { | |
625 | dev_err(&hdev->pdev->dev, | |
626 | "query pf resource failed %d.\n", ret); | |
627 | return ret; | |
628 | } | |
629 | ||
d44f9b63 | 630 | req = (struct hclge_pf_res_cmd *)desc.data; |
46a3df9f S |
631 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); |
632 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; | |
633 | ||
e92a0843 | 634 | if (hnae3_dev_roce_supported(hdev)) { |
5355e6d3 JS |
635 | hdev->roce_base_msix_offset = |
636 | hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), | |
637 | HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); | |
887c3820 | 638 | hdev->num_roce_msi = |
ccc23ef3 PL |
639 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
640 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
641 | |
642 | /* PF should have NIC vectors and Roce vectors, | |
643 | * NIC vectors are queued before Roce vectors. | |
644 | */ | |
5355e6d3 JS |
645 | hdev->num_msi = hdev->num_roce_msi + |
646 | hdev->roce_base_msix_offset; | |
46a3df9f S |
647 | } else { |
648 | hdev->num_msi = | |
ccc23ef3 PL |
649 | hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
650 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
46a3df9f S |
651 | } |
652 | ||
653 | return 0; | |
654 | } | |
655 | ||
656 | static int hclge_parse_speed(int speed_cmd, int *speed) | |
657 | { | |
658 | switch (speed_cmd) { | |
659 | case 6: | |
660 | *speed = HCLGE_MAC_SPEED_10M; | |
661 | break; | |
662 | case 7: | |
663 | *speed = HCLGE_MAC_SPEED_100M; | |
664 | break; | |
665 | case 0: | |
666 | *speed = HCLGE_MAC_SPEED_1G; | |
667 | break; | |
668 | case 1: | |
669 | *speed = HCLGE_MAC_SPEED_10G; | |
670 | break; | |
671 | case 2: | |
672 | *speed = HCLGE_MAC_SPEED_25G; | |
673 | break; | |
674 | case 3: | |
675 | *speed = HCLGE_MAC_SPEED_40G; | |
676 | break; | |
677 | case 4: | |
678 | *speed = HCLGE_MAC_SPEED_50G; | |
679 | break; | |
680 | case 5: | |
681 | *speed = HCLGE_MAC_SPEED_100G; | |
682 | break; | |
683 | default: | |
684 | return -EINVAL; | |
685 | } | |
686 | ||
687 | return 0; | |
688 | } | |
689 | ||
d92ceae9 FL |
690 | static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, |
691 | u8 speed_ability) | |
692 | { | |
693 | unsigned long *supported = hdev->hw.mac.supported; | |
694 | ||
695 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) | |
696 | set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, | |
697 | supported); | |
698 | ||
699 | if (speed_ability & HCLGE_SUPPORT_10G_BIT) | |
700 | set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, | |
701 | supported); | |
702 | ||
703 | if (speed_ability & HCLGE_SUPPORT_25G_BIT) | |
704 | set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, | |
705 | supported); | |
706 | ||
707 | if (speed_ability & HCLGE_SUPPORT_50G_BIT) | |
708 | set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, | |
709 | supported); | |
710 | ||
711 | if (speed_ability & HCLGE_SUPPORT_100G_BIT) | |
712 | set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, | |
713 | supported); | |
714 | ||
715 | set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); | |
716 | set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); | |
717 | } | |
718 | ||
719 | static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) | |
720 | { | |
721 | u8 media_type = hdev->hw.mac.media_type; | |
722 | ||
723 | if (media_type != HNAE3_MEDIA_TYPE_FIBER) | |
724 | return; | |
725 | ||
726 | hclge_parse_fiber_link_mode(hdev, speed_ability); | |
727 | } | |
728 | ||
46a3df9f S |
729 | static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) |
730 | { | |
d44f9b63 | 731 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
732 | u64 mac_addr_tmp_high; |
733 | u64 mac_addr_tmp; | |
734 | int i; | |
735 | ||
d44f9b63 | 736 | req = (struct hclge_cfg_param_cmd *)desc[0].data; |
46a3df9f S |
737 | |
738 | /* get the configuration */ | |
ccc23ef3 PL |
739 | cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), |
740 | HCLGE_CFG_VMDQ_M, | |
741 | HCLGE_CFG_VMDQ_S); | |
742 | cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
743 | HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); | |
744 | cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), | |
745 | HCLGE_CFG_TQP_DESC_N_M, | |
746 | HCLGE_CFG_TQP_DESC_N_S); | |
747 | ||
748 | cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
749 | HCLGE_CFG_PHY_ADDR_M, | |
750 | HCLGE_CFG_PHY_ADDR_S); | |
751 | cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
752 | HCLGE_CFG_MEDIA_TP_M, | |
753 | HCLGE_CFG_MEDIA_TP_S); | |
754 | cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), | |
755 | HCLGE_CFG_RX_BUF_LEN_M, | |
756 | HCLGE_CFG_RX_BUF_LEN_S); | |
46a3df9f S |
757 | /* get mac_address */ |
758 | mac_addr_tmp = __le32_to_cpu(req->param[2]); | |
ccc23ef3 PL |
759 | mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), |
760 | HCLGE_CFG_MAC_ADDR_H_M, | |
761 | HCLGE_CFG_MAC_ADDR_H_S); | |
46a3df9f S |
762 | |
763 | mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; | |
764 | ||
ccc23ef3 PL |
765 | cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), |
766 | HCLGE_CFG_DEFAULT_SPEED_M, | |
767 | HCLGE_CFG_DEFAULT_SPEED_S); | |
768 | cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), | |
769 | HCLGE_CFG_RSS_SIZE_M, | |
770 | HCLGE_CFG_RSS_SIZE_S); | |
c408e202 | 771 | |
46a3df9f S |
772 | for (i = 0; i < ETH_ALEN; i++) |
773 | cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; | |
774 | ||
d44f9b63 | 775 | req = (struct hclge_cfg_param_cmd *)desc[1].data; |
46a3df9f | 776 | cfg->numa_node_map = __le32_to_cpu(req->param[0]); |
d92ceae9 | 777 | |
ccc23ef3 PL |
778 | cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), |
779 | HCLGE_CFG_SPEED_ABILITY_M, | |
780 | HCLGE_CFG_SPEED_ABILITY_S); | |
2da5ec58 JS |
781 | cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), |
782 | HCLGE_CFG_UMV_TBL_SPACE_M, | |
783 | HCLGE_CFG_UMV_TBL_SPACE_S); | |
784 | if (!cfg->umv_space) | |
785 | cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; | |
46a3df9f S |
786 | } |
787 | ||
788 | /* hclge_get_cfg: query the static parameter from flash | |
789 | * @hdev: pointer to struct hclge_dev | |
790 | * @hcfg: the config structure to be getted | |
791 | */ | |
792 | static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) | |
793 | { | |
794 | struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; | |
d44f9b63 | 795 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
796 | int i, ret; |
797 | ||
798 | for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { | |
a90bb9a5 YL |
799 | u32 offset = 0; |
800 | ||
d44f9b63 | 801 | req = (struct hclge_cfg_param_cmd *)desc[i].data; |
46a3df9f S |
802 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, |
803 | true); | |
ccc23ef3 PL |
804 | hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, |
805 | HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); | |
46a3df9f | 806 | /* Len should be united by 4 bytes when send to hardware */ |
ccc23ef3 PL |
807 | hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, |
808 | HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); | |
a90bb9a5 | 809 | req->offset = cpu_to_le32(offset); |
46a3df9f S |
810 | } |
811 | ||
812 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); | |
813 | if (ret) { | |
90415e85 | 814 | dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); |
46a3df9f S |
815 | return ret; |
816 | } | |
817 | ||
818 | hclge_parse_cfg(hcfg, desc); | |
90415e85 | 819 | |
46a3df9f S |
820 | return 0; |
821 | } | |
822 | ||
823 | static int hclge_get_cap(struct hclge_dev *hdev) | |
824 | { | |
825 | int ret; | |
826 | ||
827 | ret = hclge_query_function_status(hdev); | |
828 | if (ret) { | |
829 | dev_err(&hdev->pdev->dev, | |
830 | "query function status error %d.\n", ret); | |
831 | return ret; | |
832 | } | |
833 | ||
834 | /* get pf resource */ | |
835 | ret = hclge_query_pf_resource(hdev); | |
90415e85 JS |
836 | if (ret) |
837 | dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); | |
46a3df9f | 838 | |
90415e85 | 839 | return ret; |
46a3df9f S |
840 | } |
841 | ||
842 | static int hclge_configure(struct hclge_dev *hdev) | |
843 | { | |
844 | struct hclge_cfg cfg; | |
845 | int ret, i; | |
846 | ||
847 | ret = hclge_get_cfg(hdev, &cfg); | |
848 | if (ret) { | |
849 | dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); | |
850 | return ret; | |
851 | } | |
852 | ||
853 | hdev->num_vmdq_vport = cfg.vmdq_vport_num; | |
854 | hdev->base_tqp_pid = 0; | |
c408e202 | 855 | hdev->rss_size_max = cfg.rss_size_max; |
46a3df9f | 856 | hdev->rx_buf_len = cfg.rx_buf_len; |
fbbb1536 | 857 | ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); |
46a3df9f | 858 | hdev->hw.mac.media_type = cfg.media_type; |
2a4776e1 | 859 | hdev->hw.mac.phy_addr = cfg.phy_addr; |
46a3df9f S |
860 | hdev->num_desc = cfg.tqp_desc_num; |
861 | hdev->tm_info.num_pg = 1; | |
cacde272 | 862 | hdev->tc_max = cfg.tc_num; |
46a3df9f | 863 | hdev->tm_info.hw_pfc_map = 0; |
2da5ec58 | 864 | hdev->wanted_umv_size = cfg.umv_space; |
46a3df9f S |
865 | |
866 | ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); | |
867 | if (ret) { | |
868 | dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); | |
869 | return ret; | |
870 | } | |
871 | ||
d92ceae9 FL |
872 | hclge_parse_link_mode(hdev, cfg.speed_ability); |
873 | ||
cacde272 YL |
874 | if ((hdev->tc_max > HNAE3_MAX_TC) || |
875 | (hdev->tc_max < 1)) { | |
46a3df9f | 876 | dev_warn(&hdev->pdev->dev, "TC num = %d.\n", |
cacde272 YL |
877 | hdev->tc_max); |
878 | hdev->tc_max = 1; | |
46a3df9f S |
879 | } |
880 | ||
cacde272 YL |
881 | /* Dev does not support DCB */ |
882 | if (!hnae3_dev_dcb_supported(hdev)) { | |
883 | hdev->tc_max = 1; | |
884 | hdev->pfc_max = 0; | |
885 | } else { | |
886 | hdev->pfc_max = hdev->tc_max; | |
887 | } | |
888 | ||
889 | hdev->tm_info.num_tc = hdev->tc_max; | |
890 | ||
46a3df9f | 891 | /* Currently not support uncontiuous tc */ |
cacde272 | 892 | for (i = 0; i < hdev->tm_info.num_tc; i++) |
ccc23ef3 | 893 | hnae3_set_bit(hdev->hw_tc_map, i, 1); |
46a3df9f | 894 | |
f8362fe1 | 895 | hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; |
46a3df9f S |
896 | |
897 | return ret; | |
898 | } | |
899 | ||
900 | static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, | |
901 | int tso_mss_max) | |
902 | { | |
d44f9b63 | 903 | struct hclge_cfg_tso_status_cmd *req; |
46a3df9f | 904 | struct hclge_desc desc; |
a90bb9a5 | 905 | u16 tso_mss; |
46a3df9f S |
906 | |
907 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); | |
908 | ||
d44f9b63 | 909 | req = (struct hclge_cfg_tso_status_cmd *)desc.data; |
a90bb9a5 YL |
910 | |
911 | tso_mss = 0; | |
ccc23ef3 PL |
912 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
913 | HCLGE_TSO_MSS_MIN_S, tso_mss_min); | |
a90bb9a5 YL |
914 | req->tso_mss_min = cpu_to_le16(tso_mss); |
915 | ||
916 | tso_mss = 0; | |
ccc23ef3 PL |
917 | hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, |
918 | HCLGE_TSO_MSS_MIN_S, tso_mss_max); | |
a90bb9a5 | 919 | req->tso_mss_max = cpu_to_le16(tso_mss); |
46a3df9f S |
920 | |
921 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
922 | } | |
923 | ||
73f88b00 PL |
924 | static int hclge_config_gro(struct hclge_dev *hdev, bool en) |
925 | { | |
926 | struct hclge_cfg_gro_status_cmd *req; | |
927 | struct hclge_desc desc; | |
928 | int ret; | |
929 | ||
930 | if (!hnae3_dev_gro_supported(hdev)) | |
931 | return 0; | |
932 | ||
933 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); | |
934 | req = (struct hclge_cfg_gro_status_cmd *)desc.data; | |
935 | ||
936 | req->gro_en = cpu_to_le16(en ? 1 : 0); | |
937 | ||
938 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
939 | if (ret) | |
940 | dev_err(&hdev->pdev->dev, | |
941 | "GRO hardware config cmd failed, ret = %d\n", ret); | |
942 | ||
943 | return ret; | |
944 | } | |
945 | ||
46a3df9f S |
946 | static int hclge_alloc_tqps(struct hclge_dev *hdev) |
947 | { | |
948 | struct hclge_tqp *tqp; | |
949 | int i; | |
950 | ||
951 | hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, | |
952 | sizeof(struct hclge_tqp), GFP_KERNEL); | |
953 | if (!hdev->htqp) | |
954 | return -ENOMEM; | |
955 | ||
956 | tqp = hdev->htqp; | |
957 | ||
958 | for (i = 0; i < hdev->num_tqps; i++) { | |
959 | tqp->dev = &hdev->pdev->dev; | |
960 | tqp->index = i; | |
961 | ||
962 | tqp->q.ae_algo = &ae_algo; | |
963 | tqp->q.buf_size = hdev->rx_buf_len; | |
964 | tqp->q.desc_num = hdev->num_desc; | |
965 | tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + | |
966 | i * HCLGE_TQP_REG_SIZE; | |
967 | ||
968 | tqp++; | |
969 | } | |
970 | ||
971 | return 0; | |
972 | } | |
973 | ||
974 | static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, | |
975 | u16 tqp_pid, u16 tqp_vid, bool is_pf) | |
976 | { | |
d44f9b63 | 977 | struct hclge_tqp_map_cmd *req; |
46a3df9f S |
978 | struct hclge_desc desc; |
979 | int ret; | |
980 | ||
981 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); | |
982 | ||
d44f9b63 | 983 | req = (struct hclge_tqp_map_cmd *)desc.data; |
46a3df9f | 984 | req->tqp_id = cpu_to_le16(tqp_pid); |
a90bb9a5 | 985 | req->tqp_vf = func_id; |
46a3df9f S |
986 | req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | |
987 | 1 << HCLGE_TQP_MAP_EN_B; | |
988 | req->tqp_vid = cpu_to_le16(tqp_vid); | |
989 | ||
990 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 JS |
991 | if (ret) |
992 | dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); | |
46a3df9f | 993 | |
90415e85 | 994 | return ret; |
46a3df9f S |
995 | } |
996 | ||
81356b1f | 997 | static int hclge_assign_tqp(struct hclge_vport *vport) |
46a3df9f | 998 | { |
81356b1f | 999 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
46a3df9f | 1000 | struct hclge_dev *hdev = vport->back; |
7df7dad6 | 1001 | int i, alloced; |
46a3df9f S |
1002 | |
1003 | for (i = 0, alloced = 0; i < hdev->num_tqps && | |
81356b1f | 1004 | alloced < kinfo->num_tqps; i++) { |
46a3df9f S |
1005 | if (!hdev->htqp[i].alloced) { |
1006 | hdev->htqp[i].q.handle = &vport->nic; | |
1007 | hdev->htqp[i].q.tqp_index = alloced; | |
81356b1f YL |
1008 | hdev->htqp[i].q.desc_num = kinfo->num_desc; |
1009 | kinfo->tqp[alloced] = &hdev->htqp[i].q; | |
46a3df9f | 1010 | hdev->htqp[i].alloced = true; |
46a3df9f S |
1011 | alloced++; |
1012 | } | |
1013 | } | |
81356b1f | 1014 | vport->alloc_tqps = kinfo->num_tqps; |
46a3df9f S |
1015 | |
1016 | return 0; | |
1017 | } | |
1018 | ||
81356b1f YL |
1019 | static int hclge_knic_setup(struct hclge_vport *vport, |
1020 | u16 num_tqps, u16 num_desc) | |
46a3df9f S |
1021 | { |
1022 | struct hnae3_handle *nic = &vport->nic; | |
1023 | struct hnae3_knic_private_info *kinfo = &nic->kinfo; | |
1024 | struct hclge_dev *hdev = vport->back; | |
1025 | int i, ret; | |
1026 | ||
81356b1f | 1027 | kinfo->num_desc = num_desc; |
46a3df9f S |
1028 | kinfo->rx_buf_len = hdev->rx_buf_len; |
1029 | kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); | |
1030 | kinfo->rss_size | |
1031 | = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); | |
1032 | kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; | |
1033 | ||
1034 | for (i = 0; i < HNAE3_MAX_TC; i++) { | |
1035 | if (hdev->hw_tc_map & BIT(i)) { | |
1036 | kinfo->tc_info[i].enable = true; | |
1037 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; | |
1038 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; | |
1039 | kinfo->tc_info[i].tc = i; | |
1040 | } else { | |
1041 | /* Set to default queue if TC is disable */ | |
1042 | kinfo->tc_info[i].enable = false; | |
1043 | kinfo->tc_info[i].tqp_offset = 0; | |
1044 | kinfo->tc_info[i].tqp_count = 1; | |
1045 | kinfo->tc_info[i].tc = 0; | |
1046 | } | |
1047 | } | |
1048 | ||
1049 | kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, | |
1050 | sizeof(struct hnae3_queue *), GFP_KERNEL); | |
1051 | if (!kinfo->tqp) | |
1052 | return -ENOMEM; | |
1053 | ||
81356b1f | 1054 | ret = hclge_assign_tqp(vport); |
90415e85 | 1055 | if (ret) |
46a3df9f | 1056 | dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); |
46a3df9f | 1057 | |
90415e85 | 1058 | return ret; |
46a3df9f S |
1059 | } |
1060 | ||
7df7dad6 L |
1061 | static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, |
1062 | struct hclge_vport *vport) | |
1063 | { | |
1064 | struct hnae3_handle *nic = &vport->nic; | |
1065 | struct hnae3_knic_private_info *kinfo; | |
1066 | u16 i; | |
1067 | ||
1068 | kinfo = &nic->kinfo; | |
1069 | for (i = 0; i < kinfo->num_tqps; i++) { | |
1070 | struct hclge_tqp *q = | |
1071 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
1072 | bool is_pf; | |
1073 | int ret; | |
1074 | ||
1075 | is_pf = !(vport->vport_id); | |
1076 | ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, | |
1077 | i, is_pf); | |
1078 | if (ret) | |
1079 | return ret; | |
1080 | } | |
1081 | ||
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | static int hclge_map_tqp(struct hclge_dev *hdev) | |
1086 | { | |
1087 | struct hclge_vport *vport = hdev->vport; | |
1088 | u16 i, num_vport; | |
1089 | ||
1090 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1091 | for (i = 0; i < num_vport; i++) { | |
1092 | int ret; | |
1093 | ||
1094 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
1095 | if (ret) | |
1096 | return ret; | |
1097 | ||
1098 | vport++; | |
1099 | } | |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | ||
46a3df9f S |
1104 | static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) |
1105 | { | |
1106 | /* this would be initialized later */ | |
1107 | } | |
1108 | ||
1109 | static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) | |
1110 | { | |
1111 | struct hnae3_handle *nic = &vport->nic; | |
1112 | struct hclge_dev *hdev = vport->back; | |
1113 | int ret; | |
1114 | ||
1115 | nic->pdev = hdev->pdev; | |
1116 | nic->ae_algo = &ae_algo; | |
1117 | nic->numa_node_mask = hdev->numa_node_mask; | |
1118 | ||
1119 | if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { | |
81356b1f | 1120 | ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); |
46a3df9f S |
1121 | if (ret) { |
1122 | dev_err(&hdev->pdev->dev, "knic setup failed %d\n", | |
1123 | ret); | |
1124 | return ret; | |
1125 | } | |
1126 | } else { | |
1127 | hclge_unic_setup(vport, num_tqps); | |
1128 | } | |
1129 | ||
1130 | return 0; | |
1131 | } | |
1132 | ||
1133 | static int hclge_alloc_vport(struct hclge_dev *hdev) | |
1134 | { | |
1135 | struct pci_dev *pdev = hdev->pdev; | |
1136 | struct hclge_vport *vport; | |
1137 | u32 tqp_main_vport; | |
1138 | u32 tqp_per_vport; | |
1139 | int num_vport, i; | |
1140 | int ret; | |
1141 | ||
1142 | /* We need to alloc a vport for main NIC of PF */ | |
1143 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1144 | ||
b76edfb2 HT |
1145 | if (hdev->num_tqps < num_vport) { |
1146 | dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", | |
1147 | hdev->num_tqps, num_vport); | |
1148 | return -EINVAL; | |
1149 | } | |
46a3df9f S |
1150 | |
1151 | /* Alloc the same number of TQPs for every vport */ | |
1152 | tqp_per_vport = hdev->num_tqps / num_vport; | |
1153 | tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; | |
1154 | ||
1155 | vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), | |
1156 | GFP_KERNEL); | |
1157 | if (!vport) | |
1158 | return -ENOMEM; | |
1159 | ||
1160 | hdev->vport = vport; | |
1161 | hdev->num_alloc_vport = num_vport; | |
1162 | ||
bc59f827 FL |
1163 | if (IS_ENABLED(CONFIG_PCI_IOV)) |
1164 | hdev->num_alloc_vfs = hdev->num_req_vfs; | |
46a3df9f S |
1165 | |
1166 | for (i = 0; i < num_vport; i++) { | |
1167 | vport->back = hdev; | |
1168 | vport->vport_id = i; | |
b2c04029 | 1169 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; |
46a3df9f S |
1170 | |
1171 | if (i == 0) | |
1172 | ret = hclge_vport_setup(vport, tqp_main_vport); | |
1173 | else | |
1174 | ret = hclge_vport_setup(vport, tqp_per_vport); | |
1175 | if (ret) { | |
1176 | dev_err(&pdev->dev, | |
1177 | "vport setup failed for vport %d, %d\n", | |
1178 | i, ret); | |
1179 | return ret; | |
1180 | } | |
1181 | ||
1182 | vport++; | |
1183 | } | |
1184 | ||
1185 | return 0; | |
1186 | } | |
1187 | ||
acf61ecd YL |
1188 | static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, |
1189 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1190 | { |
1191 | /* TX buffer size is unit by 128 byte */ | |
1192 | #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 | |
1193 | #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) | |
d44f9b63 | 1194 | struct hclge_tx_buff_alloc_cmd *req; |
46a3df9f S |
1195 | struct hclge_desc desc; |
1196 | int ret; | |
1197 | u8 i; | |
1198 | ||
d44f9b63 | 1199 | req = (struct hclge_tx_buff_alloc_cmd *)desc.data; |
46a3df9f S |
1200 | |
1201 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); | |
9ffe79a9 | 1202 | for (i = 0; i < HCLGE_TC_NUM; i++) { |
acf61ecd | 1203 | u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 | 1204 | |
46a3df9f S |
1205 | req->tx_pkt_buff[i] = |
1206 | cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | | |
1207 | HCLGE_BUF_SIZE_UPDATE_EN_MSK); | |
9ffe79a9 | 1208 | } |
46a3df9f S |
1209 | |
1210 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 1211 | if (ret) |
46a3df9f S |
1212 | dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", |
1213 | ret); | |
46a3df9f | 1214 | |
90415e85 | 1215 | return ret; |
46a3df9f S |
1216 | } |
1217 | ||
acf61ecd YL |
1218 | static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, |
1219 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1220 | { |
acf61ecd | 1221 | int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); |
46a3df9f | 1222 | |
90415e85 JS |
1223 | if (ret) |
1224 | dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); | |
46a3df9f | 1225 | |
90415e85 | 1226 | return ret; |
46a3df9f S |
1227 | } |
1228 | ||
1229 | static int hclge_get_tc_num(struct hclge_dev *hdev) | |
1230 | { | |
1231 | int i, cnt = 0; | |
1232 | ||
1233 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1234 | if (hdev->hw_tc_map & BIT(i)) | |
1235 | cnt++; | |
1236 | return cnt; | |
1237 | } | |
1238 | ||
1239 | static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) | |
1240 | { | |
1241 | int i, cnt = 0; | |
1242 | ||
1243 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1244 | if (hdev->hw_tc_map & BIT(i) && | |
1245 | hdev->tm_info.hw_pfc_map & BIT(i)) | |
1246 | cnt++; | |
1247 | return cnt; | |
1248 | } | |
1249 | ||
1250 | /* Get the number of pfc enabled TCs, which have private buffer */ | |
acf61ecd YL |
1251 | static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, |
1252 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1253 | { |
1254 | struct hclge_priv_buf *priv; | |
1255 | int i, cnt = 0; | |
1256 | ||
1257 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1258 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1259 | if ((hdev->tm_info.hw_pfc_map & BIT(i)) && |
1260 | priv->enable) | |
1261 | cnt++; | |
1262 | } | |
1263 | ||
1264 | return cnt; | |
1265 | } | |
1266 | ||
1267 | /* Get the number of pfc disabled TCs, which have private buffer */ | |
acf61ecd YL |
1268 | static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, |
1269 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1270 | { |
1271 | struct hclge_priv_buf *priv; | |
1272 | int i, cnt = 0; | |
1273 | ||
1274 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1275 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1276 | if (hdev->hw_tc_map & BIT(i) && |
1277 | !(hdev->tm_info.hw_pfc_map & BIT(i)) && | |
1278 | priv->enable) | |
1279 | cnt++; | |
1280 | } | |
1281 | ||
1282 | return cnt; | |
1283 | } | |
1284 | ||
acf61ecd | 1285 | static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
46a3df9f S |
1286 | { |
1287 | struct hclge_priv_buf *priv; | |
1288 | u32 rx_priv = 0; | |
1289 | int i; | |
1290 | ||
1291 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1292 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1293 | if (priv->enable) |
1294 | rx_priv += priv->buf_size; | |
1295 | } | |
1296 | return rx_priv; | |
1297 | } | |
1298 | ||
acf61ecd | 1299 | static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
9ffe79a9 YL |
1300 | { |
1301 | u32 i, total_tx_size = 0; | |
1302 | ||
1303 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
acf61ecd | 1304 | total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 YL |
1305 | |
1306 | return total_tx_size; | |
1307 | } | |
1308 | ||
acf61ecd YL |
1309 | static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, |
1310 | struct hclge_pkt_buf_alloc *buf_alloc, | |
1311 | u32 rx_all) | |
46a3df9f S |
1312 | { |
1313 | u32 shared_buf_min, shared_buf_tc, shared_std; | |
1314 | int tc_num, pfc_enable_num; | |
1315 | u32 shared_buf; | |
1316 | u32 rx_priv; | |
1317 | int i; | |
1318 | ||
1319 | tc_num = hclge_get_tc_num(hdev); | |
1320 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); | |
1321 | ||
d221df4e YL |
1322 | if (hnae3_dev_dcb_supported(hdev)) |
1323 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; | |
1324 | else | |
1325 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; | |
1326 | ||
46a3df9f S |
1327 | shared_buf_tc = pfc_enable_num * hdev->mps + |
1328 | (tc_num - pfc_enable_num) * hdev->mps / 2 + | |
1329 | hdev->mps; | |
1330 | shared_std = max_t(u32, shared_buf_min, shared_buf_tc); | |
1331 | ||
acf61ecd | 1332 | rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); |
46a3df9f S |
1333 | if (rx_all <= rx_priv + shared_std) |
1334 | return false; | |
1335 | ||
1336 | shared_buf = rx_all - rx_priv; | |
acf61ecd YL |
1337 | buf_alloc->s_buf.buf_size = shared_buf; |
1338 | buf_alloc->s_buf.self.high = shared_buf; | |
1339 | buf_alloc->s_buf.self.low = 2 * hdev->mps; | |
46a3df9f S |
1340 | |
1341 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
1342 | if ((hdev->hw_tc_map & BIT(i)) && | |
1343 | (hdev->tm_info.hw_pfc_map & BIT(i))) { | |
acf61ecd YL |
1344 | buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; |
1345 | buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; | |
46a3df9f | 1346 | } else { |
acf61ecd YL |
1347 | buf_alloc->s_buf.tc_thrd[i].low = 0; |
1348 | buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; | |
46a3df9f S |
1349 | } |
1350 | } | |
1351 | ||
1352 | return true; | |
1353 | } | |
1354 | ||
acf61ecd YL |
1355 | static int hclge_tx_buffer_calc(struct hclge_dev *hdev, |
1356 | struct hclge_pkt_buf_alloc *buf_alloc) | |
9ffe79a9 YL |
1357 | { |
1358 | u32 i, total_size; | |
1359 | ||
1360 | total_size = hdev->pkt_buf_size; | |
1361 | ||
1362 | /* alloc tx buffer for all enabled tc */ | |
1363 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1364 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
9ffe79a9 YL |
1365 | |
1366 | if (total_size < HCLGE_DEFAULT_TX_BUF) | |
1367 | return -ENOMEM; | |
1368 | ||
1369 | if (hdev->hw_tc_map & BIT(i)) | |
1370 | priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; | |
1371 | else | |
1372 | priv->tx_buf_size = 0; | |
1373 | ||
1374 | total_size -= priv->tx_buf_size; | |
1375 | } | |
1376 | ||
1377 | return 0; | |
1378 | } | |
1379 | ||
46a3df9f S |
1380 | /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs |
1381 | * @hdev: pointer to struct hclge_dev | |
acf61ecd | 1382 | * @buf_alloc: pointer to buffer calculation data |
46a3df9f S |
1383 | * @return: 0: calculate sucessful, negative: fail |
1384 | */ | |
1db9b1bf YL |
1385 | static int hclge_rx_buffer_calc(struct hclge_dev *hdev, |
1386 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1387 | { |
d748274d YL |
1388 | #define HCLGE_BUF_SIZE_UNIT 128 |
1389 | u32 rx_all = hdev->pkt_buf_size, aligned_mps; | |
46a3df9f S |
1390 | int no_pfc_priv_num, pfc_priv_num; |
1391 | struct hclge_priv_buf *priv; | |
1392 | int i; | |
1393 | ||
d748274d | 1394 | aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
acf61ecd | 1395 | rx_all -= hclge_get_tx_buff_alloced(buf_alloc); |
9ffe79a9 | 1396 | |
d602a525 YL |
1397 | /* When DCB is not supported, rx private |
1398 | * buffer is not allocated. | |
1399 | */ | |
1400 | if (!hnae3_dev_dcb_supported(hdev)) { | |
acf61ecd | 1401 | if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
d602a525 YL |
1402 | return -ENOMEM; |
1403 | ||
1404 | return 0; | |
1405 | } | |
1406 | ||
46a3df9f S |
1407 | /* step 1, try to alloc private buffer for all enabled tc */ |
1408 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1409 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1410 | if (hdev->hw_tc_map & BIT(i)) { |
1411 | priv->enable = 1; | |
1412 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
d748274d YL |
1413 | priv->wl.low = aligned_mps; |
1414 | priv->wl.high = priv->wl.low + aligned_mps; | |
46a3df9f S |
1415 | priv->buf_size = priv->wl.high + |
1416 | HCLGE_DEFAULT_DV; | |
1417 | } else { | |
1418 | priv->wl.low = 0; | |
d748274d | 1419 | priv->wl.high = 2 * aligned_mps; |
46a3df9f S |
1420 | priv->buf_size = priv->wl.high; |
1421 | } | |
bb1fe9ea YL |
1422 | } else { |
1423 | priv->enable = 0; | |
1424 | priv->wl.low = 0; | |
1425 | priv->wl.high = 0; | |
1426 | priv->buf_size = 0; | |
46a3df9f S |
1427 | } |
1428 | } | |
1429 | ||
acf61ecd | 1430 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1431 | return 0; |
1432 | ||
1433 | /* step 2, try to decrease the buffer size of | |
1434 | * no pfc TC's private buffer | |
1435 | */ | |
1436 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1437 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f | 1438 | |
bb1fe9ea YL |
1439 | priv->enable = 0; |
1440 | priv->wl.low = 0; | |
1441 | priv->wl.high = 0; | |
1442 | priv->buf_size = 0; | |
1443 | ||
1444 | if (!(hdev->hw_tc_map & BIT(i))) | |
1445 | continue; | |
1446 | ||
1447 | priv->enable = 1; | |
46a3df9f S |
1448 | |
1449 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1450 | priv->wl.low = 128; | |
d748274d | 1451 | priv->wl.high = priv->wl.low + aligned_mps; |
46a3df9f S |
1452 | priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; |
1453 | } else { | |
1454 | priv->wl.low = 0; | |
d748274d | 1455 | priv->wl.high = aligned_mps; |
46a3df9f S |
1456 | priv->buf_size = priv->wl.high; |
1457 | } | |
1458 | } | |
1459 | ||
acf61ecd | 1460 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1461 | return 0; |
1462 | ||
1463 | /* step 3, try to reduce the number of pfc disabled TCs, | |
1464 | * which have private buffer | |
1465 | */ | |
1466 | /* get the total no pfc enable TC number, which have private buffer */ | |
acf61ecd | 1467 | no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1468 | |
1469 | /* let the last to be cleared first */ | |
1470 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1471 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1472 | |
1473 | if (hdev->hw_tc_map & BIT(i) && | |
1474 | !(hdev->tm_info.hw_pfc_map & BIT(i))) { | |
1475 | /* Clear the no pfc TC private buffer */ | |
1476 | priv->wl.low = 0; | |
1477 | priv->wl.high = 0; | |
1478 | priv->buf_size = 0; | |
1479 | priv->enable = 0; | |
1480 | no_pfc_priv_num--; | |
1481 | } | |
1482 | ||
acf61ecd | 1483 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1484 | no_pfc_priv_num == 0) |
1485 | break; | |
1486 | } | |
1487 | ||
acf61ecd | 1488 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1489 | return 0; |
1490 | ||
1491 | /* step 4, try to reduce the number of pfc enabled TCs | |
1492 | * which have private buffer. | |
1493 | */ | |
acf61ecd | 1494 | pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1495 | |
1496 | /* let the last to be cleared first */ | |
1497 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1498 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1499 | |
1500 | if (hdev->hw_tc_map & BIT(i) && | |
1501 | hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1502 | /* Reduce the number of pfc TC with private buffer */ | |
1503 | priv->wl.low = 0; | |
1504 | priv->enable = 0; | |
1505 | priv->wl.high = 0; | |
1506 | priv->buf_size = 0; | |
1507 | pfc_priv_num--; | |
1508 | } | |
1509 | ||
acf61ecd | 1510 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1511 | pfc_priv_num == 0) |
1512 | break; | |
1513 | } | |
acf61ecd | 1514 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1515 | return 0; |
1516 | ||
1517 | return -ENOMEM; | |
1518 | } | |
1519 | ||
acf61ecd YL |
1520 | static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, |
1521 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1522 | { |
d44f9b63 | 1523 | struct hclge_rx_priv_buff_cmd *req; |
46a3df9f S |
1524 | struct hclge_desc desc; |
1525 | int ret; | |
1526 | int i; | |
1527 | ||
1528 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); | |
d44f9b63 | 1529 | req = (struct hclge_rx_priv_buff_cmd *)desc.data; |
46a3df9f S |
1530 | |
1531 | /* Alloc private buffer TCs */ | |
1532 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1533 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1534 | |
1535 | req->buf_num[i] = | |
1536 | cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); | |
1537 | req->buf_num[i] |= | |
5bca3b94 | 1538 | cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); |
46a3df9f S |
1539 | } |
1540 | ||
b8c8bf47 | 1541 | req->shared_buf = |
acf61ecd | 1542 | cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | |
b8c8bf47 YL |
1543 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); |
1544 | ||
46a3df9f | 1545 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
90415e85 | 1546 | if (ret) |
46a3df9f S |
1547 | dev_err(&hdev->pdev->dev, |
1548 | "rx private buffer alloc cmd failed %d\n", ret); | |
46a3df9f | 1549 | |
90415e85 | 1550 | return ret; |
46a3df9f S |
1551 | } |
1552 | ||
acf61ecd YL |
1553 | static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, |
1554 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1555 | { |
1556 | struct hclge_rx_priv_wl_buf *req; | |
1557 | struct hclge_priv_buf *priv; | |
1558 | struct hclge_desc desc[2]; | |
1559 | int i, j; | |
1560 | int ret; | |
1561 | ||
1562 | for (i = 0; i < 2; i++) { | |
1563 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, | |
1564 | false); | |
1565 | req = (struct hclge_rx_priv_wl_buf *)desc[i].data; | |
1566 | ||
1567 | /* The first descriptor set the NEXT bit to 1 */ | |
1568 | if (i == 0) | |
1569 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1570 | else | |
1571 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1572 | ||
1573 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
acf61ecd YL |
1574 | u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; |
1575 | ||
1576 | priv = &buf_alloc->priv_buf[idx]; | |
46a3df9f S |
1577 | req->tc_wl[j].high = |
1578 | cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); | |
1579 | req->tc_wl[j].high |= | |
ee6b549b | 1580 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1581 | req->tc_wl[j].low = |
1582 | cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); | |
1583 | req->tc_wl[j].low |= | |
ee6b549b | 1584 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1585 | } |
1586 | } | |
1587 | ||
1588 | /* Send 2 descriptor at one time */ | |
1589 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
90415e85 | 1590 | if (ret) |
46a3df9f S |
1591 | dev_err(&hdev->pdev->dev, |
1592 | "rx private waterline config cmd failed %d\n", | |
1593 | ret); | |
90415e85 | 1594 | return ret; |
46a3df9f S |
1595 | } |
1596 | ||
acf61ecd YL |
1597 | static int hclge_common_thrd_config(struct hclge_dev *hdev, |
1598 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1599 | { |
acf61ecd | 1600 | struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; |
46a3df9f S |
1601 | struct hclge_rx_com_thrd *req; |
1602 | struct hclge_desc desc[2]; | |
1603 | struct hclge_tc_thrd *tc; | |
1604 | int i, j; | |
1605 | int ret; | |
1606 | ||
1607 | for (i = 0; i < 2; i++) { | |
1608 | hclge_cmd_setup_basic_desc(&desc[i], | |
1609 | HCLGE_OPC_RX_COM_THRD_ALLOC, false); | |
1610 | req = (struct hclge_rx_com_thrd *)&desc[i].data; | |
1611 | ||
1612 | /* The first descriptor set the NEXT bit to 1 */ | |
1613 | if (i == 0) | |
1614 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1615 | else | |
1616 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1617 | ||
1618 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
1619 | tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; | |
1620 | ||
1621 | req->com_thrd[j].high = | |
1622 | cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); | |
1623 | req->com_thrd[j].high |= | |
ee6b549b | 1624 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1625 | req->com_thrd[j].low = |
1626 | cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); | |
1627 | req->com_thrd[j].low |= | |
ee6b549b | 1628 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1629 | } |
1630 | } | |
1631 | ||
1632 | /* Send 2 descriptors at one time */ | |
1633 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
90415e85 | 1634 | if (ret) |
46a3df9f S |
1635 | dev_err(&hdev->pdev->dev, |
1636 | "common threshold config cmd failed %d\n", ret); | |
90415e85 | 1637 | return ret; |
46a3df9f S |
1638 | } |
1639 | ||
acf61ecd YL |
1640 | static int hclge_common_wl_config(struct hclge_dev *hdev, |
1641 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1642 | { |
acf61ecd | 1643 | struct hclge_shared_buf *buf = &buf_alloc->s_buf; |
46a3df9f S |
1644 | struct hclge_rx_com_wl *req; |
1645 | struct hclge_desc desc; | |
1646 | int ret; | |
1647 | ||
1648 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); | |
1649 | ||
1650 | req = (struct hclge_rx_com_wl *)desc.data; | |
1651 | req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); | |
ee6b549b | 1652 | req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1653 | |
1654 | req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); | |
ee6b549b | 1655 | req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
46a3df9f S |
1656 | |
1657 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 1658 | if (ret) |
46a3df9f S |
1659 | dev_err(&hdev->pdev->dev, |
1660 | "common waterline config cmd failed %d\n", ret); | |
930ff2f6 | 1661 | |
90415e85 | 1662 | return ret; |
46a3df9f S |
1663 | } |
1664 | ||
1665 | int hclge_buffer_alloc(struct hclge_dev *hdev) | |
1666 | { | |
acf61ecd | 1667 | struct hclge_pkt_buf_alloc *pkt_buf; |
46a3df9f S |
1668 | int ret; |
1669 | ||
acf61ecd YL |
1670 | pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); |
1671 | if (!pkt_buf) | |
46a3df9f S |
1672 | return -ENOMEM; |
1673 | ||
acf61ecd | 1674 | ret = hclge_tx_buffer_calc(hdev, pkt_buf); |
9ffe79a9 YL |
1675 | if (ret) { |
1676 | dev_err(&hdev->pdev->dev, | |
1677 | "could not calc tx buffer size for all TCs %d\n", ret); | |
acf61ecd | 1678 | goto out; |
9ffe79a9 YL |
1679 | } |
1680 | ||
acf61ecd | 1681 | ret = hclge_tx_buffer_alloc(hdev, pkt_buf); |
46a3df9f S |
1682 | if (ret) { |
1683 | dev_err(&hdev->pdev->dev, | |
1684 | "could not alloc tx buffers %d\n", ret); | |
acf61ecd | 1685 | goto out; |
46a3df9f S |
1686 | } |
1687 | ||
acf61ecd | 1688 | ret = hclge_rx_buffer_calc(hdev, pkt_buf); |
46a3df9f S |
1689 | if (ret) { |
1690 | dev_err(&hdev->pdev->dev, | |
1691 | "could not calc rx priv buffer size for all TCs %d\n", | |
1692 | ret); | |
acf61ecd | 1693 | goto out; |
46a3df9f S |
1694 | } |
1695 | ||
acf61ecd | 1696 | ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); |
46a3df9f S |
1697 | if (ret) { |
1698 | dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", | |
1699 | ret); | |
acf61ecd | 1700 | goto out; |
46a3df9f S |
1701 | } |
1702 | ||
2daf4a65 | 1703 | if (hnae3_dev_dcb_supported(hdev)) { |
acf61ecd | 1704 | ret = hclge_rx_priv_wl_config(hdev, pkt_buf); |
2daf4a65 YL |
1705 | if (ret) { |
1706 | dev_err(&hdev->pdev->dev, | |
1707 | "could not configure rx private waterline %d\n", | |
1708 | ret); | |
acf61ecd | 1709 | goto out; |
2daf4a65 | 1710 | } |
46a3df9f | 1711 | |
acf61ecd | 1712 | ret = hclge_common_thrd_config(hdev, pkt_buf); |
2daf4a65 YL |
1713 | if (ret) { |
1714 | dev_err(&hdev->pdev->dev, | |
1715 | "could not configure common threshold %d\n", | |
1716 | ret); | |
acf61ecd | 1717 | goto out; |
2daf4a65 | 1718 | } |
46a3df9f S |
1719 | } |
1720 | ||
acf61ecd YL |
1721 | ret = hclge_common_wl_config(hdev, pkt_buf); |
1722 | if (ret) | |
46a3df9f S |
1723 | dev_err(&hdev->pdev->dev, |
1724 | "could not configure common waterline %d\n", ret); | |
46a3df9f | 1725 | |
acf61ecd YL |
1726 | out: |
1727 | kfree(pkt_buf); | |
1728 | return ret; | |
46a3df9f S |
1729 | } |
1730 | ||
1731 | static int hclge_init_roce_base_info(struct hclge_vport *vport) | |
1732 | { | |
1733 | struct hnae3_handle *roce = &vport->roce; | |
1734 | struct hnae3_handle *nic = &vport->nic; | |
1735 | ||
887c3820 | 1736 | roce->rinfo.num_vectors = vport->back->num_roce_msi; |
46a3df9f S |
1737 | |
1738 | if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || | |
1739 | vport->back->num_msi_left == 0) | |
1740 | return -EINVAL; | |
1741 | ||
1742 | roce->rinfo.base_vector = vport->back->roce_base_vector; | |
1743 | ||
1744 | roce->rinfo.netdev = nic->kinfo.netdev; | |
1745 | roce->rinfo.roce_io_base = vport->back->hw.io_base; | |
1746 | ||
1747 | roce->pdev = nic->pdev; | |
1748 | roce->ae_algo = nic->ae_algo; | |
1749 | roce->numa_node_mask = nic->numa_node_mask; | |
1750 | ||
1751 | return 0; | |
1752 | } | |
1753 | ||
887c3820 | 1754 | static int hclge_init_msi(struct hclge_dev *hdev) |
46a3df9f S |
1755 | { |
1756 | struct pci_dev *pdev = hdev->pdev; | |
887c3820 SM |
1757 | int vectors; |
1758 | int i; | |
46a3df9f | 1759 | |
887c3820 SM |
1760 | vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, |
1761 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | |
1762 | if (vectors < 0) { | |
1763 | dev_err(&pdev->dev, | |
1764 | "failed(%d) to allocate MSI/MSI-X vectors\n", | |
1765 | vectors); | |
1766 | return vectors; | |
46a3df9f | 1767 | } |
887c3820 SM |
1768 | if (vectors < hdev->num_msi) |
1769 | dev_warn(&hdev->pdev->dev, | |
1770 | "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", | |
1771 | hdev->num_msi, vectors); | |
46a3df9f | 1772 | |
887c3820 SM |
1773 | hdev->num_msi = vectors; |
1774 | hdev->num_msi_left = vectors; | |
1775 | hdev->base_msi_vector = pdev->irq; | |
46a3df9f | 1776 | hdev->roce_base_vector = hdev->base_msi_vector + |
5355e6d3 | 1777 | hdev->roce_base_msix_offset; |
46a3df9f | 1778 | |
46a3df9f S |
1779 | hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1780 | sizeof(u16), GFP_KERNEL); | |
887c3820 SM |
1781 | if (!hdev->vector_status) { |
1782 | pci_free_irq_vectors(pdev); | |
46a3df9f | 1783 | return -ENOMEM; |
887c3820 | 1784 | } |
46a3df9f S |
1785 | |
1786 | for (i = 0; i < hdev->num_msi; i++) | |
1787 | hdev->vector_status[i] = HCLGE_INVALID_VPORT; | |
1788 | ||
887c3820 SM |
1789 | hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, |
1790 | sizeof(int), GFP_KERNEL); | |
1791 | if (!hdev->vector_irq) { | |
1792 | pci_free_irq_vectors(pdev); | |
1793 | return -ENOMEM; | |
46a3df9f | 1794 | } |
46a3df9f S |
1795 | |
1796 | return 0; | |
1797 | } | |
1798 | ||
1c780066 | 1799 | static u8 hclge_check_speed_dup(u8 duplex, int speed) |
46a3df9f | 1800 | { |
46a3df9f | 1801 | |
1c780066 YL |
1802 | if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) |
1803 | duplex = HCLGE_MAC_FULL; | |
46a3df9f | 1804 | |
1c780066 | 1805 | return duplex; |
46a3df9f S |
1806 | } |
1807 | ||
1c780066 YL |
1808 | static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, |
1809 | u8 duplex) | |
46a3df9f | 1810 | { |
d44f9b63 | 1811 | struct hclge_config_mac_speed_dup_cmd *req; |
46a3df9f S |
1812 | struct hclge_desc desc; |
1813 | int ret; | |
1814 | ||
d44f9b63 | 1815 | req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; |
46a3df9f S |
1816 | |
1817 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); | |
1818 | ||
ccc23ef3 | 1819 | hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); |
46a3df9f S |
1820 | |
1821 | switch (speed) { | |
1822 | case HCLGE_MAC_SPEED_10M: | |
ccc23ef3 PL |
1823 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1824 | HCLGE_CFG_SPEED_S, 6); | |
46a3df9f S |
1825 | break; |
1826 | case HCLGE_MAC_SPEED_100M: | |
ccc23ef3 PL |
1827 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1828 | HCLGE_CFG_SPEED_S, 7); | |
46a3df9f S |
1829 | break; |
1830 | case HCLGE_MAC_SPEED_1G: | |
ccc23ef3 PL |
1831 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1832 | HCLGE_CFG_SPEED_S, 0); | |
46a3df9f S |
1833 | break; |
1834 | case HCLGE_MAC_SPEED_10G: | |
ccc23ef3 PL |
1835 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1836 | HCLGE_CFG_SPEED_S, 1); | |
46a3df9f S |
1837 | break; |
1838 | case HCLGE_MAC_SPEED_25G: | |
ccc23ef3 PL |
1839 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1840 | HCLGE_CFG_SPEED_S, 2); | |
46a3df9f S |
1841 | break; |
1842 | case HCLGE_MAC_SPEED_40G: | |
ccc23ef3 PL |
1843 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1844 | HCLGE_CFG_SPEED_S, 3); | |
46a3df9f S |
1845 | break; |
1846 | case HCLGE_MAC_SPEED_50G: | |
ccc23ef3 PL |
1847 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1848 | HCLGE_CFG_SPEED_S, 4); | |
46a3df9f S |
1849 | break; |
1850 | case HCLGE_MAC_SPEED_100G: | |
ccc23ef3 PL |
1851 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, |
1852 | HCLGE_CFG_SPEED_S, 5); | |
46a3df9f S |
1853 | break; |
1854 | default: | |
d7629e74 | 1855 | dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); |
46a3df9f S |
1856 | return -EINVAL; |
1857 | } | |
1858 | ||
ccc23ef3 PL |
1859 | hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, |
1860 | 1); | |
46a3df9f S |
1861 | |
1862 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1863 | if (ret) { | |
1864 | dev_err(&hdev->pdev->dev, | |
1865 | "mac speed/duplex config cmd failed %d.\n", ret); | |
1866 | return ret; | |
1867 | } | |
1868 | ||
1c780066 YL |
1869 | return 0; |
1870 | } | |
1871 | ||
1872 | int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) | |
1873 | { | |
1874 | int ret; | |
1875 | ||
1876 | duplex = hclge_check_speed_dup(duplex, speed); | |
1877 | if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) | |
1878 | return 0; | |
1879 | ||
1880 | ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); | |
1881 | if (ret) | |
1882 | return ret; | |
1883 | ||
1884 | hdev->hw.mac.speed = speed; | |
1885 | hdev->hw.mac.duplex = duplex; | |
46a3df9f S |
1886 | |
1887 | return 0; | |
1888 | } | |
1889 | ||
1890 | static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, | |
1891 | u8 duplex) | |
1892 | { | |
1893 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1894 | struct hclge_dev *hdev = vport->back; | |
1895 | ||
1896 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex); | |
1897 | } | |
1898 | ||
1899 | static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, | |
1900 | u8 *duplex) | |
1901 | { | |
d44f9b63 | 1902 | struct hclge_query_an_speed_dup_cmd *req; |
46a3df9f S |
1903 | struct hclge_desc desc; |
1904 | int speed_tmp; | |
1905 | int ret; | |
1906 | ||
d44f9b63 | 1907 | req = (struct hclge_query_an_speed_dup_cmd *)desc.data; |
46a3df9f S |
1908 | |
1909 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); | |
1910 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1911 | if (ret) { | |
1912 | dev_err(&hdev->pdev->dev, | |
1913 | "mac speed/autoneg/duplex query cmd failed %d\n", | |
1914 | ret); | |
1915 | return ret; | |
1916 | } | |
1917 | ||
ccc23ef3 PL |
1918 | *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); |
1919 | speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, | |
1920 | HCLGE_QUERY_SPEED_S); | |
46a3df9f S |
1921 | |
1922 | ret = hclge_parse_speed(speed_tmp, speed); | |
90415e85 | 1923 | if (ret) |
46a3df9f S |
1924 | dev_err(&hdev->pdev->dev, |
1925 | "could not parse speed(=%d), %d\n", speed_tmp, ret); | |
46a3df9f | 1926 | |
90415e85 | 1927 | return ret; |
46a3df9f S |
1928 | } |
1929 | ||
46a3df9f S |
1930 | static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) |
1931 | { | |
d44f9b63 | 1932 | struct hclge_config_auto_neg_cmd *req; |
46a3df9f | 1933 | struct hclge_desc desc; |
a90bb9a5 | 1934 | u32 flag = 0; |
46a3df9f S |
1935 | int ret; |
1936 | ||
1937 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); | |
1938 | ||
d44f9b63 | 1939 | req = (struct hclge_config_auto_neg_cmd *)desc.data; |
ccc23ef3 | 1940 | hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); |
a90bb9a5 | 1941 | req->cfg_an_cmd_flag = cpu_to_le32(flag); |
46a3df9f S |
1942 | |
1943 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 1944 | if (ret) |
46a3df9f S |
1945 | dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", |
1946 | ret); | |
46a3df9f | 1947 | |
90415e85 | 1948 | return ret; |
46a3df9f S |
1949 | } |
1950 | ||
1951 | static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) | |
1952 | { | |
1953 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1954 | struct hclge_dev *hdev = vport->back; | |
1955 | ||
1956 | return hclge_set_autoneg_en(hdev, enable); | |
1957 | } | |
1958 | ||
1959 | static int hclge_get_autoneg(struct hnae3_handle *handle) | |
1960 | { | |
1961 | struct hclge_vport *vport = hclge_get_vport(handle); | |
1962 | struct hclge_dev *hdev = vport->back; | |
9ff804ee FL |
1963 | struct phy_device *phydev = hdev->hw.mac.phydev; |
1964 | ||
1965 | if (phydev) | |
1966 | return phydev->autoneg; | |
46a3df9f S |
1967 | |
1968 | return hdev->hw.mac.autoneg; | |
1969 | } | |
1970 | ||
1971 | static int hclge_mac_init(struct hclge_dev *hdev) | |
1972 | { | |
1973 | struct hclge_mac *mac = &hdev->hw.mac; | |
1974 | int ret; | |
1975 | ||
1c780066 YL |
1976 | hdev->hw.mac.duplex = HCLGE_MAC_FULL; |
1977 | ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, | |
1978 | hdev->hw.mac.duplex); | |
46a3df9f S |
1979 | if (ret) { |
1980 | dev_err(&hdev->pdev->dev, | |
1981 | "Config mac speed dup fail ret=%d\n", ret); | |
1982 | return ret; | |
1983 | } | |
1984 | ||
1985 | mac->link = 0; | |
1986 | ||
4ee09281 YL |
1987 | ret = hclge_set_mac_mtu(hdev, hdev->mps); |
1988 | if (ret) { | |
1989 | dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); | |
1990 | return ret; | |
1991 | } | |
59bc85ec | 1992 | |
4ee09281 | 1993 | ret = hclge_buffer_alloc(hdev); |
90415e85 | 1994 | if (ret) |
59bc85ec | 1995 | dev_err(&hdev->pdev->dev, |
4ee09281 | 1996 | "allocate buffer fail, ret=%d\n", ret); |
59bc85ec | 1997 | |
90415e85 | 1998 | return ret; |
46a3df9f S |
1999 | } |
2000 | ||
22fd3468 SM |
2001 | static void hclge_mbx_task_schedule(struct hclge_dev *hdev) |
2002 | { | |
2003 | if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) | |
2004 | schedule_work(&hdev->mbx_service_task); | |
2005 | } | |
2006 | ||
ed4a1bb8 SM |
2007 | static void hclge_reset_task_schedule(struct hclge_dev *hdev) |
2008 | { | |
2009 | if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) | |
2010 | schedule_work(&hdev->rst_service_task); | |
2011 | } | |
2012 | ||
46a3df9f S |
2013 | static void hclge_task_schedule(struct hclge_dev *hdev) |
2014 | { | |
2015 | if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && | |
2016 | !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && | |
2017 | !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) | |
2018 | (void)schedule_work(&hdev->service_task); | |
2019 | } | |
2020 | ||
2021 | static int hclge_get_mac_link_status(struct hclge_dev *hdev) | |
2022 | { | |
d44f9b63 | 2023 | struct hclge_link_status_cmd *req; |
46a3df9f S |
2024 | struct hclge_desc desc; |
2025 | int link_status; | |
2026 | int ret; | |
2027 | ||
2028 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); | |
2029 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2030 | if (ret) { | |
2031 | dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", | |
2032 | ret); | |
2033 | return ret; | |
2034 | } | |
2035 | ||
d44f9b63 | 2036 | req = (struct hclge_link_status_cmd *)desc.data; |
e23e21ea | 2037 | link_status = req->status & HCLGE_LINK_STATUS_UP_M; |
46a3df9f S |
2038 | |
2039 | return !!link_status; | |
2040 | } | |
2041 | ||
2042 | static int hclge_get_mac_phy_link(struct hclge_dev *hdev) | |
2043 | { | |
2044 | int mac_state; | |
2045 | int link_stat; | |
2046 | ||
ed6acb33 PL |
2047 | if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) |
2048 | return 0; | |
2049 | ||
46a3df9f S |
2050 | mac_state = hclge_get_mac_link_status(hdev); |
2051 | ||
2052 | if (hdev->hw.mac.phydev) { | |
7ce8e698 | 2053 | if (hdev->hw.mac.phydev->state == PHY_RUNNING) |
46a3df9f S |
2054 | link_stat = mac_state & |
2055 | hdev->hw.mac.phydev->link; | |
2056 | else | |
2057 | link_stat = 0; | |
2058 | ||
2059 | } else { | |
2060 | link_stat = mac_state; | |
2061 | } | |
2062 | ||
2063 | return !!link_stat; | |
2064 | } | |
2065 | ||
2066 | static void hclge_update_link_status(struct hclge_dev *hdev) | |
2067 | { | |
2068 | struct hnae3_client *client = hdev->nic_client; | |
2069 | struct hnae3_handle *handle; | |
2070 | int state; | |
2071 | int i; | |
2072 | ||
2073 | if (!client) | |
2074 | return; | |
2075 | state = hclge_get_mac_phy_link(hdev); | |
2076 | if (state != hdev->hw.mac.link) { | |
2077 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2078 | handle = &hdev->vport[i].nic; | |
2079 | client->ops->link_status_change(handle, state); | |
2080 | } | |
2081 | hdev->hw.mac.link = state; | |
2082 | } | |
2083 | } | |
2084 | ||
2085 | static int hclge_update_speed_duplex(struct hclge_dev *hdev) | |
2086 | { | |
2087 | struct hclge_mac mac = hdev->hw.mac; | |
2088 | u8 duplex; | |
2089 | int speed; | |
2090 | int ret; | |
2091 | ||
2092 | /* get the speed and duplex as autoneg'result from mac cmd when phy | |
2093 | * doesn't exit. | |
2094 | */ | |
c040366b | 2095 | if (mac.phydev || !mac.autoneg) |
46a3df9f S |
2096 | return 0; |
2097 | ||
2098 | ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); | |
2099 | if (ret) { | |
2100 | dev_err(&hdev->pdev->dev, | |
2101 | "mac autoneg/speed/duplex query failed %d\n", ret); | |
2102 | return ret; | |
2103 | } | |
2104 | ||
1c780066 YL |
2105 | ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); |
2106 | if (ret) { | |
2107 | dev_err(&hdev->pdev->dev, | |
2108 | "mac speed/duplex config failed %d\n", ret); | |
2109 | return ret; | |
46a3df9f S |
2110 | } |
2111 | ||
2112 | return 0; | |
2113 | } | |
2114 | ||
2115 | static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) | |
2116 | { | |
2117 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2118 | struct hclge_dev *hdev = vport->back; | |
2119 | ||
2120 | return hclge_update_speed_duplex(hdev); | |
2121 | } | |
2122 | ||
2123 | static int hclge_get_status(struct hnae3_handle *handle) | |
2124 | { | |
2125 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2126 | struct hclge_dev *hdev = vport->back; | |
2127 | ||
2128 | hclge_update_link_status(hdev); | |
2129 | ||
2130 | return hdev->hw.mac.link; | |
2131 | } | |
2132 | ||
d039ef68 | 2133 | static void hclge_service_timer(struct timer_list *t) |
46a3df9f | 2134 | { |
d039ef68 | 2135 | struct hclge_dev *hdev = from_timer(hdev, t, service_timer); |
46a3df9f | 2136 | |
d039ef68 | 2137 | mod_timer(&hdev->service_timer, jiffies + HZ); |
7a5d2a39 | 2138 | hdev->hw_stats.stats_timer++; |
46a3df9f S |
2139 | hclge_task_schedule(hdev); |
2140 | } | |
2141 | ||
2142 | static void hclge_service_complete(struct hclge_dev *hdev) | |
2143 | { | |
2144 | WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); | |
2145 | ||
2146 | /* Flush memory before next watchdog */ | |
2147 | smp_mb__before_atomic(); | |
2148 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); | |
2149 | } | |
2150 | ||
202f2014 SM |
2151 | static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) |
2152 | { | |
2153 | u32 rst_src_reg; | |
22fd3468 | 2154 | u32 cmdq_src_reg; |
202f2014 SM |
2155 | |
2156 | /* fetch the events from their corresponding regs */ | |
0bcc9ba1 | 2157 | rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); |
22fd3468 SM |
2158 | cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); |
2159 | ||
2160 | /* Assumption: If by any chance reset and mailbox events are reported | |
2161 | * together then we will only process reset event in this go and will | |
2162 | * defer the processing of the mailbox events. Since, we would have not | |
2163 | * cleared RX CMDQ event this time we would receive again another | |
2164 | * interrupt from H/W just for the mailbox. | |
2165 | */ | |
202f2014 SM |
2166 | |
2167 | /* check for vector0 reset event sources */ | |
de2eae69 HT |
2168 | if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { |
2169 | dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); | |
2170 | set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); | |
2171 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2172 | *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2173 | return HCLGE_VECTOR0_EVENT_RST; | |
2174 | } | |
2175 | ||
202f2014 | 2176 | if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { |
1afdb53a | 2177 | dev_info(&hdev->pdev->dev, "global reset interrupt\n"); |
7edef4ce | 2178 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
202f2014 SM |
2179 | set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); |
2180 | *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2181 | return HCLGE_VECTOR0_EVENT_RST; | |
2182 | } | |
2183 | ||
2184 | if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { | |
1afdb53a | 2185 | dev_info(&hdev->pdev->dev, "core reset interrupt\n"); |
7edef4ce | 2186 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); |
202f2014 SM |
2187 | set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); |
2188 | *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2189 | return HCLGE_VECTOR0_EVENT_RST; | |
2190 | } | |
2191 | ||
22fd3468 SM |
2192 | /* check for vector0 mailbox(=CMDQ RX) event source */ |
2193 | if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { | |
2194 | cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); | |
2195 | *clearval = cmdq_src_reg; | |
2196 | return HCLGE_VECTOR0_EVENT_MBX; | |
2197 | } | |
202f2014 SM |
2198 | |
2199 | return HCLGE_VECTOR0_EVENT_OTHER; | |
2200 | } | |
2201 | ||
2202 | static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, | |
2203 | u32 regclr) | |
2204 | { | |
22fd3468 SM |
2205 | switch (event_type) { |
2206 | case HCLGE_VECTOR0_EVENT_RST: | |
202f2014 | 2207 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); |
22fd3468 SM |
2208 | break; |
2209 | case HCLGE_VECTOR0_EVENT_MBX: | |
2210 | hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); | |
2211 | break; | |
085920ba JS |
2212 | default: |
2213 | break; | |
22fd3468 | 2214 | } |
202f2014 SM |
2215 | } |
2216 | ||
9ab4ad14 XW |
2217 | static void hclge_clear_all_event_cause(struct hclge_dev *hdev) |
2218 | { | |
2219 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, | |
2220 | BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | | |
2221 | BIT(HCLGE_VECTOR0_CORERESET_INT_B) | | |
2222 | BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); | |
2223 | hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); | |
2224 | } | |
2225 | ||
466b0c00 L |
2226 | static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) |
2227 | { | |
2228 | writel(enable ? 1 : 0, vector->addr); | |
2229 | } | |
2230 | ||
2231 | static irqreturn_t hclge_misc_irq_handle(int irq, void *data) | |
2232 | { | |
2233 | struct hclge_dev *hdev = data; | |
202f2014 SM |
2234 | u32 event_cause; |
2235 | u32 clearval; | |
466b0c00 L |
2236 | |
2237 | hclge_enable_vector(&hdev->misc_vector, false); | |
202f2014 SM |
2238 | event_cause = hclge_check_event_cause(hdev, &clearval); |
2239 | ||
22fd3468 | 2240 | /* vector 0 interrupt is shared with reset and mailbox source events.*/ |
202f2014 SM |
2241 | switch (event_cause) { |
2242 | case HCLGE_VECTOR0_EVENT_RST: | |
ed4a1bb8 | 2243 | hclge_reset_task_schedule(hdev); |
202f2014 | 2244 | break; |
22fd3468 SM |
2245 | case HCLGE_VECTOR0_EVENT_MBX: |
2246 | /* If we are here then, | |
2247 | * 1. Either we are not handling any mbx task and we are not | |
2248 | * scheduled as well | |
2249 | * OR | |
2250 | * 2. We could be handling a mbx task but nothing more is | |
2251 | * scheduled. | |
2252 | * In both cases, we should schedule mbx task as there are more | |
2253 | * mbx messages reported by this interrupt. | |
2254 | */ | |
2255 | hclge_mbx_task_schedule(hdev); | |
40ee4b71 | 2256 | break; |
202f2014 | 2257 | default: |
40ee4b71 YL |
2258 | dev_warn(&hdev->pdev->dev, |
2259 | "received unknown or unhandled event of vector0\n"); | |
202f2014 SM |
2260 | break; |
2261 | } | |
2262 | ||
e9a50d09 | 2263 | /* clear the source of interrupt if it is not cause by reset */ |
c9fc48dc | 2264 | if (event_cause == HCLGE_VECTOR0_EVENT_MBX) { |
e9a50d09 YL |
2265 | hclge_clear_event_cause(hdev, event_cause, clearval); |
2266 | hclge_enable_vector(&hdev->misc_vector, true); | |
2267 | } | |
466b0c00 L |
2268 | |
2269 | return IRQ_HANDLED; | |
2270 | } | |
2271 | ||
2272 | static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) | |
2273 | { | |
1dc5378f PL |
2274 | if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { |
2275 | dev_warn(&hdev->pdev->dev, | |
2276 | "vector(vector_id %d) has been freed.\n", vector_id); | |
2277 | return; | |
2278 | } | |
2279 | ||
466b0c00 L |
2280 | hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; |
2281 | hdev->num_msi_left += 1; | |
2282 | hdev->num_msi_used -= 1; | |
2283 | } | |
2284 | ||
2285 | static void hclge_get_misc_vector(struct hclge_dev *hdev) | |
2286 | { | |
2287 | struct hclge_misc_vector *vector = &hdev->misc_vector; | |
2288 | ||
2289 | vector->vector_irq = pci_irq_vector(hdev->pdev, 0); | |
2290 | ||
2291 | vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; | |
2292 | hdev->vector_status[0] = 0; | |
2293 | ||
2294 | hdev->num_msi_left -= 1; | |
2295 | hdev->num_msi_used += 1; | |
2296 | } | |
2297 | ||
2298 | static int hclge_misc_irq_init(struct hclge_dev *hdev) | |
2299 | { | |
2300 | int ret; | |
2301 | ||
2302 | hclge_get_misc_vector(hdev); | |
2303 | ||
202f2014 SM |
2304 | /* this would be explicitly freed in the end */ |
2305 | ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, | |
2306 | 0, "hclge_misc", hdev); | |
466b0c00 L |
2307 | if (ret) { |
2308 | hclge_free_vector(hdev, 0); | |
2309 | dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", | |
2310 | hdev->misc_vector.vector_irq); | |
2311 | } | |
2312 | ||
2313 | return ret; | |
2314 | } | |
2315 | ||
202f2014 SM |
2316 | static void hclge_misc_irq_uninit(struct hclge_dev *hdev) |
2317 | { | |
2318 | free_irq(hdev->misc_vector.vector_irq, hdev); | |
2319 | hclge_free_vector(hdev, 0); | |
2320 | } | |
2321 | ||
4ed340ab L |
2322 | static int hclge_notify_client(struct hclge_dev *hdev, |
2323 | enum hnae3_reset_notify_type type) | |
2324 | { | |
2325 | struct hnae3_client *client = hdev->nic_client; | |
2326 | u16 i; | |
2327 | ||
2328 | if (!client->ops->reset_notify) | |
2329 | return -EOPNOTSUPP; | |
2330 | ||
2331 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
ad7c82fe | 2332 | struct hnae3_handle *handle = &hdev->vport[i].nic; |
2333 | int ret; | |
b38db544 | 2334 | |
4ed340ab | 2335 | ret = client->ops->reset_notify(handle, type); |
1afdb53a HT |
2336 | if (ret) { |
2337 | dev_err(&hdev->pdev->dev, | |
2338 | "notify nic client failed %d(%d)\n", type, ret); | |
4ed340ab | 2339 | return ret; |
1afdb53a | 2340 | } |
4ed340ab L |
2341 | } |
2342 | ||
6060dc84 | 2343 | return 0; |
4ed340ab L |
2344 | } |
2345 | ||
3db6b633 HT |
2346 | static int hclge_notify_roce_client(struct hclge_dev *hdev, |
2347 | enum hnae3_reset_notify_type type) | |
2348 | { | |
2349 | struct hnae3_client *client = hdev->roce_client; | |
2350 | int ret = 0; | |
2351 | u16 i; | |
2352 | ||
2353 | if (!client) | |
2354 | return 0; | |
2355 | ||
2356 | if (!client->ops->reset_notify) | |
2357 | return -EOPNOTSUPP; | |
2358 | ||
2359 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2360 | struct hnae3_handle *handle = &hdev->vport[i].roce; | |
2361 | ||
2362 | ret = client->ops->reset_notify(handle, type); | |
2363 | if (ret) { | |
2364 | dev_err(&hdev->pdev->dev, | |
2365 | "notify roce client failed %d(%d)", | |
2366 | type, ret); | |
2367 | return ret; | |
2368 | } | |
2369 | } | |
2370 | ||
2371 | return ret; | |
2372 | } | |
2373 | ||
4ed340ab L |
2374 | static int hclge_reset_wait(struct hclge_dev *hdev) |
2375 | { | |
2376 | #define HCLGE_RESET_WATI_MS 100 | |
de2eae69 | 2377 | #define HCLGE_RESET_WAIT_CNT 200 |
4ed340ab L |
2378 | u32 val, reg, reg_bit; |
2379 | u32 cnt = 0; | |
2380 | ||
2381 | switch (hdev->reset_type) { | |
de2eae69 HT |
2382 | case HNAE3_IMP_RESET: |
2383 | reg = HCLGE_GLOBAL_RESET_REG; | |
2384 | reg_bit = HCLGE_IMP_RESET_BIT; | |
2385 | break; | |
4ed340ab L |
2386 | case HNAE3_GLOBAL_RESET: |
2387 | reg = HCLGE_GLOBAL_RESET_REG; | |
2388 | reg_bit = HCLGE_GLOBAL_RESET_BIT; | |
2389 | break; | |
2390 | case HNAE3_CORE_RESET: | |
2391 | reg = HCLGE_GLOBAL_RESET_REG; | |
2392 | reg_bit = HCLGE_CORE_RESET_BIT; | |
2393 | break; | |
2394 | case HNAE3_FUNC_RESET: | |
2395 | reg = HCLGE_FUN_RST_ING; | |
2396 | reg_bit = HCLGE_FUN_RST_ING_B; | |
2397 | break; | |
26977990 HT |
2398 | case HNAE3_FLR_RESET: |
2399 | break; | |
4ed340ab L |
2400 | default: |
2401 | dev_err(&hdev->pdev->dev, | |
2402 | "Wait for unsupported reset type: %d\n", | |
2403 | hdev->reset_type); | |
2404 | return -EINVAL; | |
2405 | } | |
2406 | ||
26977990 HT |
2407 | if (hdev->reset_type == HNAE3_FLR_RESET) { |
2408 | while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && | |
2409 | cnt++ < HCLGE_RESET_WAIT_CNT) | |
2410 | msleep(HCLGE_RESET_WATI_MS); | |
2411 | ||
2412 | if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { | |
2413 | dev_err(&hdev->pdev->dev, | |
2414 | "flr wait timeout: %d\n", cnt); | |
2415 | return -EBUSY; | |
2416 | } | |
2417 | ||
2418 | return 0; | |
2419 | } | |
2420 | ||
4ed340ab | 2421 | val = hclge_read_dev(&hdev->hw, reg); |
ccc23ef3 | 2422 | while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { |
4ed340ab L |
2423 | msleep(HCLGE_RESET_WATI_MS); |
2424 | val = hclge_read_dev(&hdev->hw, reg); | |
2425 | cnt++; | |
2426 | } | |
2427 | ||
4ed340ab L |
2428 | if (cnt >= HCLGE_RESET_WAIT_CNT) { |
2429 | dev_warn(&hdev->pdev->dev, | |
2430 | "Wait for reset timeout: %d\n", hdev->reset_type); | |
2431 | return -EBUSY; | |
2432 | } | |
2433 | ||
2434 | return 0; | |
2435 | } | |
2436 | ||
7885e906 HT |
2437 | static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) |
2438 | { | |
2439 | struct hclge_vf_rst_cmd *req; | |
2440 | struct hclge_desc desc; | |
2441 | ||
2442 | req = (struct hclge_vf_rst_cmd *)desc.data; | |
2443 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); | |
2444 | req->dest_vfid = func_id; | |
2445 | ||
2446 | if (reset) | |
2447 | req->vf_rst = 0x1; | |
2448 | ||
2449 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
2450 | } | |
2451 | ||
2452 | int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) | |
2453 | { | |
2454 | int i; | |
2455 | ||
2456 | for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { | |
2457 | struct hclge_vport *vport = &hdev->vport[i]; | |
2458 | int ret; | |
2459 | ||
2460 | /* Send cmd to set/clear VF's FUNC_RST_ING */ | |
2461 | ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); | |
2462 | if (ret) { | |
2463 | dev_err(&hdev->pdev->dev, | |
5f9c2a66 | 2464 | "set vf(%d) rst failed %d!\n", |
7885e906 HT |
2465 | vport->vport_id, ret); |
2466 | return ret; | |
2467 | } | |
2468 | ||
2469 | if (!reset) | |
2470 | continue; | |
2471 | ||
2472 | /* Inform VF to process the reset. | |
2473 | * hclge_inform_reset_assert_to_vf may fail if VF | |
2474 | * driver is not loaded. | |
2475 | */ | |
2476 | ret = hclge_inform_reset_assert_to_vf(vport); | |
2477 | if (ret) | |
2478 | dev_warn(&hdev->pdev->dev, | |
5f9c2a66 | 2479 | "inform reset to vf(%d) failed %d!\n", |
7885e906 HT |
2480 | vport->vport_id, ret); |
2481 | } | |
2482 | ||
2483 | return 0; | |
2484 | } | |
2485 | ||
13a86fae | 2486 | int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) |
4ed340ab L |
2487 | { |
2488 | struct hclge_desc desc; | |
2489 | struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; | |
2490 | int ret; | |
2491 | ||
2492 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); | |
ccc23ef3 | 2493 | hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); |
4ed340ab L |
2494 | req->fun_reset_vfid = func_id; |
2495 | ||
2496 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2497 | if (ret) | |
2498 | dev_err(&hdev->pdev->dev, | |
2499 | "send function reset cmd fail, status =%d\n", ret); | |
2500 | ||
2501 | return ret; | |
2502 | } | |
2503 | ||
d5752031 | 2504 | static void hclge_do_reset(struct hclge_dev *hdev) |
4ed340ab L |
2505 | { |
2506 | struct pci_dev *pdev = hdev->pdev; | |
2507 | u32 val; | |
2508 | ||
d5752031 | 2509 | switch (hdev->reset_type) { |
4ed340ab L |
2510 | case HNAE3_GLOBAL_RESET: |
2511 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
ccc23ef3 | 2512 | hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); |
4ed340ab L |
2513 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2514 | dev_info(&pdev->dev, "Global Reset requested\n"); | |
2515 | break; | |
2516 | case HNAE3_CORE_RESET: | |
2517 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
ccc23ef3 | 2518 | hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); |
4ed340ab L |
2519 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
2520 | dev_info(&pdev->dev, "Core Reset requested\n"); | |
2521 | break; | |
2522 | case HNAE3_FUNC_RESET: | |
2523 | dev_info(&pdev->dev, "PF Reset requested\n"); | |
ed4a1bb8 SM |
2524 | /* schedule again to check later */ |
2525 | set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); | |
2526 | hclge_reset_task_schedule(hdev); | |
4ed340ab | 2527 | break; |
26977990 HT |
2528 | case HNAE3_FLR_RESET: |
2529 | dev_info(&pdev->dev, "FLR requested\n"); | |
2530 | /* schedule again to check later */ | |
2531 | set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); | |
2532 | hclge_reset_task_schedule(hdev); | |
2533 | break; | |
4ed340ab L |
2534 | default: |
2535 | dev_warn(&pdev->dev, | |
d5752031 | 2536 | "Unsupported reset type: %d\n", hdev->reset_type); |
4ed340ab L |
2537 | break; |
2538 | } | |
2539 | } | |
2540 | ||
d5752031 SM |
2541 | static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, |
2542 | unsigned long *addr) | |
2543 | { | |
2544 | enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; | |
2545 | ||
2546 | /* return the highest priority reset level amongst all */ | |
62aff578 HT |
2547 | if (test_bit(HNAE3_IMP_RESET, addr)) { |
2548 | rst_level = HNAE3_IMP_RESET; | |
2549 | clear_bit(HNAE3_IMP_RESET, addr); | |
2550 | clear_bit(HNAE3_GLOBAL_RESET, addr); | |
2551 | clear_bit(HNAE3_CORE_RESET, addr); | |
2552 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2553 | } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { | |
d5752031 | 2554 | rst_level = HNAE3_GLOBAL_RESET; |
62aff578 HT |
2555 | clear_bit(HNAE3_GLOBAL_RESET, addr); |
2556 | clear_bit(HNAE3_CORE_RESET, addr); | |
2557 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2558 | } else if (test_bit(HNAE3_CORE_RESET, addr)) { | |
d5752031 | 2559 | rst_level = HNAE3_CORE_RESET; |
62aff578 HT |
2560 | clear_bit(HNAE3_CORE_RESET, addr); |
2561 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2562 | } else if (test_bit(HNAE3_FUNC_RESET, addr)) { | |
d5752031 | 2563 | rst_level = HNAE3_FUNC_RESET; |
62aff578 | 2564 | clear_bit(HNAE3_FUNC_RESET, addr); |
26977990 HT |
2565 | } else if (test_bit(HNAE3_FLR_RESET, addr)) { |
2566 | rst_level = HNAE3_FLR_RESET; | |
2567 | clear_bit(HNAE3_FLR_RESET, addr); | |
62aff578 | 2568 | } |
d5752031 SM |
2569 | |
2570 | return rst_level; | |
2571 | } | |
2572 | ||
e9a50d09 YL |
2573 | static void hclge_clear_reset_cause(struct hclge_dev *hdev) |
2574 | { | |
2575 | u32 clearval = 0; | |
2576 | ||
2577 | switch (hdev->reset_type) { | |
2578 | case HNAE3_IMP_RESET: | |
2579 | clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2580 | break; | |
2581 | case HNAE3_GLOBAL_RESET: | |
2582 | clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2583 | break; | |
2584 | case HNAE3_CORE_RESET: | |
2585 | clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2586 | break; | |
2587 | default: | |
e9a50d09 YL |
2588 | break; |
2589 | } | |
2590 | ||
2591 | if (!clearval) | |
2592 | return; | |
2593 | ||
2594 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); | |
2595 | hclge_enable_vector(&hdev->misc_vector, true); | |
2596 | } | |
2597 | ||
7885e906 HT |
2598 | static int hclge_reset_prepare_down(struct hclge_dev *hdev) |
2599 | { | |
2600 | int ret = 0; | |
2601 | ||
2602 | switch (hdev->reset_type) { | |
2603 | case HNAE3_FUNC_RESET: | |
26977990 HT |
2604 | /* fall through */ |
2605 | case HNAE3_FLR_RESET: | |
7885e906 HT |
2606 | ret = hclge_set_all_vf_rst(hdev, true); |
2607 | break; | |
2608 | default: | |
2609 | break; | |
2610 | } | |
2611 | ||
2612 | return ret; | |
2613 | } | |
2614 | ||
48ac80db HT |
2615 | static int hclge_reset_prepare_wait(struct hclge_dev *hdev) |
2616 | { | |
de2eae69 | 2617 | u32 reg_val; |
48ac80db HT |
2618 | int ret = 0; |
2619 | ||
2620 | switch (hdev->reset_type) { | |
2621 | case HNAE3_FUNC_RESET: | |
7885e906 HT |
2622 | /* There is no mechanism for PF to know if VF has stopped IO |
2623 | * for now, just wait 100 ms for VF to stop IO | |
2624 | */ | |
2625 | msleep(100); | |
48ac80db HT |
2626 | ret = hclge_func_reset_cmd(hdev, 0); |
2627 | if (ret) { | |
2628 | dev_err(&hdev->pdev->dev, | |
7707c27b | 2629 | "asserting function reset fail %d!\n", ret); |
48ac80db HT |
2630 | return ret; |
2631 | } | |
2632 | ||
2633 | /* After performaning pf reset, it is not necessary to do the | |
2634 | * mailbox handling or send any command to firmware, because | |
2635 | * any mailbox handling or command to firmware is only valid | |
2636 | * after hclge_cmd_init is called. | |
2637 | */ | |
2638 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2639 | break; | |
26977990 HT |
2640 | case HNAE3_FLR_RESET: |
2641 | /* There is no mechanism for PF to know if VF has stopped IO | |
2642 | * for now, just wait 100 ms for VF to stop IO | |
2643 | */ | |
2644 | msleep(100); | |
2645 | set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); | |
2646 | set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
2647 | break; | |
de2eae69 HT |
2648 | case HNAE3_IMP_RESET: |
2649 | reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); | |
2650 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, | |
2651 | BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); | |
2652 | break; | |
48ac80db HT |
2653 | default: |
2654 | break; | |
2655 | } | |
2656 | ||
2657 | dev_info(&hdev->pdev->dev, "prepare wait ok\n"); | |
2658 | ||
2659 | return ret; | |
2660 | } | |
2661 | ||
1afdb53a HT |
2662 | static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) |
2663 | { | |
2664 | #define MAX_RESET_FAIL_CNT 5 | |
2665 | #define RESET_UPGRADE_DELAY_SEC 10 | |
2666 | ||
2667 | if (hdev->reset_pending) { | |
2668 | dev_info(&hdev->pdev->dev, "Reset pending %lu\n", | |
2669 | hdev->reset_pending); | |
2670 | return true; | |
2671 | } else if ((hdev->reset_type != HNAE3_IMP_RESET) && | |
2672 | (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & | |
2673 | BIT(HCLGE_IMP_RESET_BIT))) { | |
2674 | dev_info(&hdev->pdev->dev, | |
2675 | "reset failed because IMP Reset is pending\n"); | |
2676 | hclge_clear_reset_cause(hdev); | |
2677 | return false; | |
2678 | } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { | |
2679 | hdev->reset_fail_cnt++; | |
2680 | if (is_timeout) { | |
2681 | set_bit(hdev->reset_type, &hdev->reset_pending); | |
2682 | dev_info(&hdev->pdev->dev, | |
2683 | "re-schedule to wait for hw reset done\n"); | |
2684 | return true; | |
2685 | } | |
2686 | ||
2687 | dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); | |
2688 | hclge_clear_reset_cause(hdev); | |
2689 | mod_timer(&hdev->reset_timer, | |
2690 | jiffies + RESET_UPGRADE_DELAY_SEC * HZ); | |
2691 | ||
2692 | return false; | |
2693 | } | |
2694 | ||
2695 | hclge_clear_reset_cause(hdev); | |
2696 | dev_err(&hdev->pdev->dev, "Reset fail!\n"); | |
2697 | return false; | |
2698 | } | |
2699 | ||
7885e906 HT |
2700 | static int hclge_reset_prepare_up(struct hclge_dev *hdev) |
2701 | { | |
2702 | int ret = 0; | |
2703 | ||
2704 | switch (hdev->reset_type) { | |
2705 | case HNAE3_FUNC_RESET: | |
26977990 HT |
2706 | /* fall through */ |
2707 | case HNAE3_FLR_RESET: | |
7885e906 HT |
2708 | ret = hclge_set_all_vf_rst(hdev, false); |
2709 | break; | |
2710 | default: | |
2711 | break; | |
2712 | } | |
2713 | ||
2714 | return ret; | |
2715 | } | |
2716 | ||
d5752031 SM |
2717 | static void hclge_reset(struct hclge_dev *hdev) |
2718 | { | |
7ce98982 | 2719 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); |
1afdb53a HT |
2720 | bool is_timeout = false; |
2721 | int ret; | |
1a45360a | 2722 | |
7ce98982 JS |
2723 | /* Initialize ae_dev reset status as well, in case enet layer wants to |
2724 | * know if device is undergoing reset | |
2725 | */ | |
2726 | ae_dev->reset_type = hdev->reset_type; | |
225c02eb | 2727 | hdev->reset_count++; |
1a2f7bf2 | 2728 | hdev->last_reset_time = jiffies; |
d5752031 | 2729 | /* perform reset of the stack & ae device for a client */ |
1afdb53a HT |
2730 | ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); |
2731 | if (ret) | |
2732 | goto err_reset; | |
2733 | ||
7885e906 HT |
2734 | ret = hclge_reset_prepare_down(hdev); |
2735 | if (ret) | |
2736 | goto err_reset; | |
2737 | ||
47622dc9 | 2738 | rtnl_lock(); |
1afdb53a HT |
2739 | ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); |
2740 | if (ret) | |
2741 | goto err_reset_lock; | |
d5752031 | 2742 | |
1afdb53a | 2743 | rtnl_unlock(); |
48ac80db | 2744 | |
1afdb53a HT |
2745 | ret = hclge_reset_prepare_wait(hdev); |
2746 | if (ret) | |
2747 | goto err_reset; | |
e9a50d09 | 2748 | |
1afdb53a HT |
2749 | if (hclge_reset_wait(hdev)) { |
2750 | is_timeout = true; | |
2751 | goto err_reset; | |
d5752031 SM |
2752 | } |
2753 | ||
1afdb53a HT |
2754 | ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); |
2755 | if (ret) | |
2756 | goto err_reset; | |
2757 | ||
2758 | rtnl_lock(); | |
2759 | ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); | |
2760 | if (ret) | |
2761 | goto err_reset_lock; | |
2762 | ||
2763 | ret = hclge_reset_ae_dev(hdev->ae_dev); | |
2764 | if (ret) | |
2765 | goto err_reset_lock; | |
2766 | ||
2767 | ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); | |
2768 | if (ret) | |
2769 | goto err_reset_lock; | |
2770 | ||
2771 | hclge_clear_reset_cause(hdev); | |
2772 | ||
7885e906 HT |
2773 | ret = hclge_reset_prepare_up(hdev); |
2774 | if (ret) | |
2775 | goto err_reset_lock; | |
2776 | ||
1afdb53a HT |
2777 | ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); |
2778 | if (ret) | |
2779 | goto err_reset_lock; | |
2780 | ||
47622dc9 | 2781 | rtnl_unlock(); |
3db6b633 | 2782 | |
1afdb53a HT |
2783 | ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); |
2784 | if (ret) | |
2785 | goto err_reset; | |
2786 | ||
2787 | ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); | |
2788 | if (ret) | |
2789 | goto err_reset; | |
2790 | ||
2791 | return; | |
2792 | ||
2793 | err_reset_lock: | |
2794 | rtnl_unlock(); | |
2795 | err_reset: | |
2796 | if (hclge_reset_err_handle(hdev, is_timeout)) | |
2797 | hclge_reset_task_schedule(hdev); | |
d5752031 SM |
2798 | } |
2799 | ||
538d8ba0 SJ |
2800 | static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) |
2801 | { | |
2802 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
2803 | struct hclge_dev *hdev = ae_dev->priv; | |
2804 | ||
2805 | /* We might end up getting called broadly because of 2 below cases: | |
2806 | * 1. Recoverable error was conveyed through APEI and only way to bring | |
2807 | * normalcy is to reset. | |
2808 | * 2. A new reset request from the stack due to timeout | |
2809 | * | |
2810 | * For the first case,error event might not have ae handle available. | |
2811 | * check if this is a new reset request and we are not here just because | |
4aef908d SM |
2812 | * last reset attempt did not succeed and watchdog hit us again. We will |
2813 | * know this if last reset request did not occur very recently (watchdog | |
2814 | * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) | |
2815 | * In case of new request we reset the "reset level" to PF reset. | |
1a45360a HT |
2816 | * And if it is a repeat reset request of the most recent one then we |
2817 | * want to make sure we throttle the reset request. Therefore, we will | |
2818 | * not allow it again before 3*HZ times. | |
4aef908d | 2819 | */ |
538d8ba0 SJ |
2820 | if (!handle) |
2821 | handle = &hdev->vport[0].nic; | |
2822 | ||
1a2f7bf2 | 2823 | if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) |
1a45360a | 2824 | return; |
2c883d73 | 2825 | else if (hdev->default_reset_request) |
1a2f7bf2 | 2826 | hdev->reset_level = |
2c883d73 HT |
2827 | hclge_get_reset_level(hdev, |
2828 | &hdev->default_reset_request); | |
1a2f7bf2 HT |
2829 | else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) |
2830 | hdev->reset_level = HNAE3_FUNC_RESET; | |
4ed340ab | 2831 | |
4aef908d | 2832 | dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", |
1a2f7bf2 | 2833 | hdev->reset_level); |
4aef908d SM |
2834 | |
2835 | /* request reset & schedule reset task */ | |
1a2f7bf2 | 2836 | set_bit(hdev->reset_level, &hdev->reset_request); |
4aef908d SM |
2837 | hclge_reset_task_schedule(hdev); |
2838 | ||
1a2f7bf2 HT |
2839 | if (hdev->reset_level < HNAE3_GLOBAL_RESET) |
2840 | hdev->reset_level++; | |
4ed340ab L |
2841 | } |
2842 | ||
2c883d73 HT |
2843 | static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, |
2844 | enum hnae3_reset_type rst_type) | |
2845 | { | |
2846 | struct hclge_dev *hdev = ae_dev->priv; | |
2847 | ||
2848 | set_bit(rst_type, &hdev->default_reset_request); | |
2849 | } | |
2850 | ||
1afdb53a HT |
2851 | static void hclge_reset_timer(struct timer_list *t) |
2852 | { | |
2853 | struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); | |
2854 | ||
2855 | dev_info(&hdev->pdev->dev, | |
2856 | "triggering global reset in reset timer\n"); | |
2857 | set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); | |
2858 | hclge_reset_event(hdev->pdev, NULL); | |
2859 | } | |
2860 | ||
4ed340ab L |
2861 | static void hclge_reset_subtask(struct hclge_dev *hdev) |
2862 | { | |
d5752031 SM |
2863 | /* check if there is any ongoing reset in the hardware. This status can |
2864 | * be checked from reset_pending. If there is then, we need to wait for | |
2865 | * hardware to complete reset. | |
2866 | * a. If we are able to figure out in reasonable time that hardware | |
2867 | * has fully resetted then, we can proceed with driver, client | |
2868 | * reset. | |
2869 | * b. else, we can come back later to check this status so re-sched | |
2870 | * now. | |
2871 | */ | |
1a2f7bf2 | 2872 | hdev->last_reset_time = jiffies; |
d5752031 SM |
2873 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); |
2874 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2875 | hclge_reset(hdev); | |
4ed340ab | 2876 | |
d5752031 SM |
2877 | /* check if we got any *new* reset requests to be honored */ |
2878 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); | |
2879 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2880 | hclge_do_reset(hdev); | |
4ed340ab | 2881 | |
4ed340ab L |
2882 | hdev->reset_type = HNAE3_NONE_RESET; |
2883 | } | |
2884 | ||
ed4a1bb8 | 2885 | static void hclge_reset_service_task(struct work_struct *work) |
466b0c00 | 2886 | { |
ed4a1bb8 SM |
2887 | struct hclge_dev *hdev = |
2888 | container_of(work, struct hclge_dev, rst_service_task); | |
2889 | ||
2890 | if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) | |
2891 | return; | |
2892 | ||
2893 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
2894 | ||
4ed340ab | 2895 | hclge_reset_subtask(hdev); |
ed4a1bb8 SM |
2896 | |
2897 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
466b0c00 L |
2898 | } |
2899 | ||
22fd3468 SM |
2900 | static void hclge_mailbox_service_task(struct work_struct *work) |
2901 | { | |
2902 | struct hclge_dev *hdev = | |
2903 | container_of(work, struct hclge_dev, mbx_service_task); | |
2904 | ||
2905 | if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) | |
2906 | return; | |
2907 | ||
2908 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
2909 | ||
2910 | hclge_mbx_handler(hdev); | |
2911 | ||
2912 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
2913 | } | |
2914 | ||
337460de YL |
2915 | static void hclge_update_vport_alive(struct hclge_dev *hdev) |
2916 | { | |
2917 | int i; | |
2918 | ||
2919 | /* start from vport 1 for PF is always alive */ | |
2920 | for (i = 1; i < hdev->num_alloc_vport; i++) { | |
2921 | struct hclge_vport *vport = &hdev->vport[i]; | |
2922 | ||
2923 | if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) | |
2924 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
b2c04029 YL |
2925 | |
2926 | /* If vf is not alive, set to default value */ | |
2927 | if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) | |
2928 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; | |
337460de YL |
2929 | } |
2930 | } | |
2931 | ||
46a3df9f S |
2932 | static void hclge_service_task(struct work_struct *work) |
2933 | { | |
2934 | struct hclge_dev *hdev = | |
2935 | container_of(work, struct hclge_dev, service_task); | |
2936 | ||
7a5d2a39 JS |
2937 | if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { |
2938 | hclge_update_stats_for_all(hdev); | |
2939 | hdev->hw_stats.stats_timer = 0; | |
2940 | } | |
2941 | ||
46a3df9f S |
2942 | hclge_update_speed_duplex(hdev); |
2943 | hclge_update_link_status(hdev); | |
337460de | 2944 | hclge_update_vport_alive(hdev); |
46a3df9f S |
2945 | hclge_service_complete(hdev); |
2946 | } | |
2947 | ||
46a3df9f S |
2948 | struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) |
2949 | { | |
2950 | /* VF handle has no client */ | |
2951 | if (!handle->client) | |
2952 | return container_of(handle, struct hclge_vport, nic); | |
2953 | else if (handle->client->type == HNAE3_CLIENT_ROCE) | |
2954 | return container_of(handle, struct hclge_vport, roce); | |
2955 | else | |
2956 | return container_of(handle, struct hclge_vport, nic); | |
2957 | } | |
2958 | ||
2959 | static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, | |
2960 | struct hnae3_vector_info *vector_info) | |
2961 | { | |
2962 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2963 | struct hnae3_vector_info *vector = vector_info; | |
2964 | struct hclge_dev *hdev = vport->back; | |
2965 | int alloc = 0; | |
2966 | int i, j; | |
2967 | ||
2968 | vector_num = min(hdev->num_msi_left, vector_num); | |
2969 | ||
2970 | for (j = 0; j < vector_num; j++) { | |
2971 | for (i = 1; i < hdev->num_msi; i++) { | |
2972 | if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { | |
2973 | vector->vector = pci_irq_vector(hdev->pdev, i); | |
2974 | vector->io_addr = hdev->hw.io_base + | |
2975 | HCLGE_VECTOR_REG_BASE + | |
2976 | (i - 1) * HCLGE_VECTOR_REG_OFFSET + | |
2977 | vport->vport_id * | |
2978 | HCLGE_VECTOR_VF_OFFSET; | |
2979 | hdev->vector_status[i] = vport->vport_id; | |
887c3820 | 2980 | hdev->vector_irq[i] = vector->vector; |
46a3df9f S |
2981 | |
2982 | vector++; | |
2983 | alloc++; | |
2984 | ||
2985 | break; | |
2986 | } | |
2987 | } | |
2988 | } | |
2989 | hdev->num_msi_left -= alloc; | |
2990 | hdev->num_msi_used += alloc; | |
2991 | ||
2992 | return alloc; | |
2993 | } | |
2994 | ||
2995 | static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) | |
2996 | { | |
2997 | int i; | |
2998 | ||
887c3820 SM |
2999 | for (i = 0; i < hdev->num_msi; i++) |
3000 | if (vector == hdev->vector_irq[i]) | |
3001 | return i; | |
3002 | ||
46a3df9f S |
3003 | return -EINVAL; |
3004 | } | |
3005 | ||
7412200c YL |
3006 | static int hclge_put_vector(struct hnae3_handle *handle, int vector) |
3007 | { | |
3008 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3009 | struct hclge_dev *hdev = vport->back; | |
3010 | int vector_id; | |
3011 | ||
3012 | vector_id = hclge_get_vector_index(hdev, vector); | |
3013 | if (vector_id < 0) { | |
3014 | dev_err(&hdev->pdev->dev, | |
3015 | "Get vector index fail. vector_id =%d\n", vector_id); | |
3016 | return vector_id; | |
3017 | } | |
3018 | ||
3019 | hclge_free_vector(hdev, vector_id); | |
3020 | ||
3021 | return 0; | |
3022 | } | |
3023 | ||
46a3df9f S |
3024 | static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) |
3025 | { | |
3026 | return HCLGE_RSS_KEY_SIZE; | |
3027 | } | |
3028 | ||
3029 | static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) | |
3030 | { | |
3031 | return HCLGE_RSS_IND_TBL_SIZE; | |
3032 | } | |
3033 | ||
46a3df9f S |
3034 | static int hclge_set_rss_algo_key(struct hclge_dev *hdev, |
3035 | const u8 hfunc, const u8 *key) | |
3036 | { | |
d44f9b63 | 3037 | struct hclge_rss_config_cmd *req; |
46a3df9f S |
3038 | struct hclge_desc desc; |
3039 | int key_offset; | |
3040 | int key_size; | |
3041 | int ret; | |
3042 | ||
d44f9b63 | 3043 | req = (struct hclge_rss_config_cmd *)desc.data; |
46a3df9f S |
3044 | |
3045 | for (key_offset = 0; key_offset < 3; key_offset++) { | |
3046 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, | |
3047 | false); | |
3048 | ||
3049 | req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); | |
3050 | req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); | |
3051 | ||
3052 | if (key_offset == 2) | |
3053 | key_size = | |
3054 | HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; | |
3055 | else | |
3056 | key_size = HCLGE_RSS_HASH_KEY_NUM; | |
3057 | ||
3058 | memcpy(req->hash_key, | |
3059 | key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); | |
3060 | ||
3061 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3062 | if (ret) { | |
3063 | dev_err(&hdev->pdev->dev, | |
3064 | "Configure RSS config fail, status = %d\n", | |
3065 | ret); | |
3066 | return ret; | |
3067 | } | |
3068 | } | |
3069 | return 0; | |
3070 | } | |
3071 | ||
dcd4ef5e | 3072 | static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) |
46a3df9f | 3073 | { |
d44f9b63 | 3074 | struct hclge_rss_indirection_table_cmd *req; |
46a3df9f S |
3075 | struct hclge_desc desc; |
3076 | int i, j; | |
3077 | int ret; | |
3078 | ||
d44f9b63 | 3079 | req = (struct hclge_rss_indirection_table_cmd *)desc.data; |
46a3df9f S |
3080 | |
3081 | for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { | |
3082 | hclge_cmd_setup_basic_desc | |
3083 | (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); | |
3084 | ||
a90bb9a5 YL |
3085 | req->start_table_index = |
3086 | cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); | |
3087 | req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); | |
46a3df9f S |
3088 | |
3089 | for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) | |
3090 | req->rss_result[j] = | |
3091 | indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; | |
3092 | ||
3093 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3094 | if (ret) { | |
3095 | dev_err(&hdev->pdev->dev, | |
3096 | "Configure rss indir table fail,status = %d\n", | |
3097 | ret); | |
3098 | return ret; | |
3099 | } | |
3100 | } | |
3101 | return 0; | |
3102 | } | |
3103 | ||
3104 | static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, | |
3105 | u16 *tc_size, u16 *tc_offset) | |
3106 | { | |
d44f9b63 | 3107 | struct hclge_rss_tc_mode_cmd *req; |
46a3df9f S |
3108 | struct hclge_desc desc; |
3109 | int ret; | |
3110 | int i; | |
3111 | ||
3112 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); | |
d44f9b63 | 3113 | req = (struct hclge_rss_tc_mode_cmd *)desc.data; |
46a3df9f S |
3114 | |
3115 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
a90bb9a5 YL |
3116 | u16 mode = 0; |
3117 | ||
ccc23ef3 PL |
3118 | hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); |
3119 | hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, | |
3120 | HCLGE_RSS_TC_SIZE_S, tc_size[i]); | |
3121 | hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, | |
3122 | HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); | |
a90bb9a5 YL |
3123 | |
3124 | req->rss_tc_mode[i] = cpu_to_le16(mode); | |
46a3df9f S |
3125 | } |
3126 | ||
3127 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 3128 | if (ret) |
46a3df9f S |
3129 | dev_err(&hdev->pdev->dev, |
3130 | "Configure rss tc mode fail, status = %d\n", ret); | |
46a3df9f | 3131 | |
90415e85 | 3132 | return ret; |
46a3df9f S |
3133 | } |
3134 | ||
8e4c877d PL |
3135 | static void hclge_get_rss_type(struct hclge_vport *vport) |
3136 | { | |
3137 | if (vport->rss_tuple_sets.ipv4_tcp_en || | |
3138 | vport->rss_tuple_sets.ipv4_udp_en || | |
3139 | vport->rss_tuple_sets.ipv4_sctp_en || | |
3140 | vport->rss_tuple_sets.ipv6_tcp_en || | |
3141 | vport->rss_tuple_sets.ipv6_udp_en || | |
3142 | vport->rss_tuple_sets.ipv6_sctp_en) | |
3143 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; | |
3144 | else if (vport->rss_tuple_sets.ipv4_fragment_en || | |
3145 | vport->rss_tuple_sets.ipv6_fragment_en) | |
3146 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; | |
3147 | else | |
3148 | vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; | |
3149 | } | |
3150 | ||
46a3df9f S |
3151 | static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) |
3152 | { | |
d44f9b63 | 3153 | struct hclge_rss_input_tuple_cmd *req; |
46a3df9f S |
3154 | struct hclge_desc desc; |
3155 | int ret; | |
3156 | ||
3157 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); | |
3158 | ||
d44f9b63 | 3159 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; |
637053ef YL |
3160 | |
3161 | /* Get the tuple cfg from pf */ | |
3162 | req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; | |
3163 | req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; | |
3164 | req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; | |
3165 | req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; | |
3166 | req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; | |
3167 | req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; | |
3168 | req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; | |
3169 | req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; | |
8e4c877d | 3170 | hclge_get_rss_type(&hdev->vport[0]); |
46a3df9f | 3171 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
90415e85 | 3172 | if (ret) |
46a3df9f S |
3173 | dev_err(&hdev->pdev->dev, |
3174 | "Configure rss input fail, status = %d\n", ret); | |
90415e85 | 3175 | return ret; |
46a3df9f S |
3176 | } |
3177 | ||
3178 | static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, | |
3179 | u8 *key, u8 *hfunc) | |
3180 | { | |
3181 | struct hclge_vport *vport = hclge_get_vport(handle); | |
46a3df9f S |
3182 | int i; |
3183 | ||
3184 | /* Get hash algorithm */ | |
6868d695 JS |
3185 | if (hfunc) { |
3186 | switch (vport->rss_algo) { | |
3187 | case HCLGE_RSS_HASH_ALGO_TOEPLITZ: | |
3188 | *hfunc = ETH_RSS_HASH_TOP; | |
3189 | break; | |
3190 | case HCLGE_RSS_HASH_ALGO_SIMPLE: | |
3191 | *hfunc = ETH_RSS_HASH_XOR; | |
3192 | break; | |
3193 | default: | |
3194 | *hfunc = ETH_RSS_HASH_UNKNOWN; | |
3195 | break; | |
3196 | } | |
3197 | } | |
46a3df9f S |
3198 | |
3199 | /* Get the RSS Key required by the user */ | |
3200 | if (key) | |
3201 | memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
3202 | ||
3203 | /* Get indirect table */ | |
3204 | if (indir) | |
3205 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3206 | indir[i] = vport->rss_indirection_tbl[i]; | |
3207 | ||
3208 | return 0; | |
3209 | } | |
3210 | ||
3211 | static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, | |
3212 | const u8 *key, const u8 hfunc) | |
3213 | { | |
3214 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3215 | struct hclge_dev *hdev = vport->back; | |
3216 | u8 hash_algo; | |
3217 | int ret, i; | |
3218 | ||
3219 | /* Set the RSS Hash Key if specififed by the user */ | |
3220 | if (key) { | |
6868d695 JS |
3221 | switch (hfunc) { |
3222 | case ETH_RSS_HASH_TOP: | |
46a3df9f | 3223 | hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; |
6868d695 JS |
3224 | break; |
3225 | case ETH_RSS_HASH_XOR: | |
3226 | hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; | |
3227 | break; | |
3228 | case ETH_RSS_HASH_NO_CHANGE: | |
3229 | hash_algo = vport->rss_algo; | |
3230 | break; | |
3231 | default: | |
46a3df9f | 3232 | return -EINVAL; |
6868d695 JS |
3233 | } |
3234 | ||
46a3df9f S |
3235 | ret = hclge_set_rss_algo_key(hdev, hash_algo, key); |
3236 | if (ret) | |
3237 | return ret; | |
dcd4ef5e YL |
3238 | |
3239 | /* Update the shadow RSS key with user specified qids */ | |
3240 | memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); | |
3241 | vport->rss_algo = hash_algo; | |
46a3df9f S |
3242 | } |
3243 | ||
3244 | /* Update the shadow RSS table with user specified qids */ | |
3245 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3246 | vport->rss_indirection_tbl[i] = indir[i]; | |
3247 | ||
3248 | /* Update the hardware */ | |
dcd4ef5e | 3249 | return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); |
46a3df9f S |
3250 | } |
3251 | ||
f7db940a L |
3252 | static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) |
3253 | { | |
3254 | u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; | |
3255 | ||
3256 | if (nfc->data & RXH_L4_B_2_3) | |
3257 | hash_sets |= HCLGE_D_PORT_BIT; | |
3258 | else | |
3259 | hash_sets &= ~HCLGE_D_PORT_BIT; | |
3260 | ||
3261 | if (nfc->data & RXH_IP_SRC) | |
3262 | hash_sets |= HCLGE_S_IP_BIT; | |
3263 | else | |
3264 | hash_sets &= ~HCLGE_S_IP_BIT; | |
3265 | ||
3266 | if (nfc->data & RXH_IP_DST) | |
3267 | hash_sets |= HCLGE_D_IP_BIT; | |
3268 | else | |
3269 | hash_sets &= ~HCLGE_D_IP_BIT; | |
3270 | ||
3271 | if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) | |
3272 | hash_sets |= HCLGE_V_TAG_BIT; | |
3273 | ||
3274 | return hash_sets; | |
3275 | } | |
3276 | ||
3277 | static int hclge_set_rss_tuple(struct hnae3_handle *handle, | |
3278 | struct ethtool_rxnfc *nfc) | |
3279 | { | |
3280 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3281 | struct hclge_dev *hdev = vport->back; | |
3282 | struct hclge_rss_input_tuple_cmd *req; | |
3283 | struct hclge_desc desc; | |
3284 | u8 tuple_sets; | |
3285 | int ret; | |
3286 | ||
3287 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | | |
3288 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) | |
3289 | return -EINVAL; | |
3290 | ||
3291 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; | |
637053ef | 3292 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); |
f7db940a | 3293 | |
637053ef YL |
3294 | req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; |
3295 | req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; | |
3296 | req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; | |
3297 | req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; | |
3298 | req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; | |
3299 | req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; | |
3300 | req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; | |
3301 | req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; | |
f7db940a L |
3302 | |
3303 | tuple_sets = hclge_get_rss_hash_bits(nfc); | |
3304 | switch (nfc->flow_type) { | |
3305 | case TCP_V4_FLOW: | |
3306 | req->ipv4_tcp_en = tuple_sets; | |
3307 | break; | |
3308 | case TCP_V6_FLOW: | |
3309 | req->ipv6_tcp_en = tuple_sets; | |
3310 | break; | |
3311 | case UDP_V4_FLOW: | |
3312 | req->ipv4_udp_en = tuple_sets; | |
3313 | break; | |
3314 | case UDP_V6_FLOW: | |
3315 | req->ipv6_udp_en = tuple_sets; | |
3316 | break; | |
3317 | case SCTP_V4_FLOW: | |
3318 | req->ipv4_sctp_en = tuple_sets; | |
3319 | break; | |
3320 | case SCTP_V6_FLOW: | |
3321 | if ((nfc->data & RXH_L4_B_0_1) || | |
3322 | (nfc->data & RXH_L4_B_2_3)) | |
3323 | return -EINVAL; | |
3324 | ||
3325 | req->ipv6_sctp_en = tuple_sets; | |
3326 | break; | |
3327 | case IPV4_FLOW: | |
3328 | req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3329 | break; | |
3330 | case IPV6_FLOW: | |
3331 | req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3332 | break; | |
3333 | default: | |
3334 | return -EINVAL; | |
3335 | } | |
3336 | ||
3337 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
637053ef | 3338 | if (ret) { |
f7db940a L |
3339 | dev_err(&hdev->pdev->dev, |
3340 | "Set rss tuple fail, status = %d\n", ret); | |
637053ef YL |
3341 | return ret; |
3342 | } | |
f7db940a | 3343 | |
637053ef YL |
3344 | vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; |
3345 | vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; | |
3346 | vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; | |
3347 | vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; | |
3348 | vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; | |
3349 | vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; | |
3350 | vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; | |
3351 | vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; | |
8e4c877d | 3352 | hclge_get_rss_type(vport); |
637053ef | 3353 | return 0; |
f7db940a L |
3354 | } |
3355 | ||
07d29954 L |
3356 | static int hclge_get_rss_tuple(struct hnae3_handle *handle, |
3357 | struct ethtool_rxnfc *nfc) | |
3358 | { | |
3359 | struct hclge_vport *vport = hclge_get_vport(handle); | |
07d29954 | 3360 | u8 tuple_sets; |
07d29954 L |
3361 | |
3362 | nfc->data = 0; | |
3363 | ||
07d29954 L |
3364 | switch (nfc->flow_type) { |
3365 | case TCP_V4_FLOW: | |
637053ef | 3366 | tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; |
07d29954 L |
3367 | break; |
3368 | case UDP_V4_FLOW: | |
637053ef | 3369 | tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; |
07d29954 L |
3370 | break; |
3371 | case TCP_V6_FLOW: | |
637053ef | 3372 | tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; |
07d29954 L |
3373 | break; |
3374 | case UDP_V6_FLOW: | |
637053ef | 3375 | tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; |
07d29954 L |
3376 | break; |
3377 | case SCTP_V4_FLOW: | |
637053ef | 3378 | tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; |
07d29954 L |
3379 | break; |
3380 | case SCTP_V6_FLOW: | |
637053ef | 3381 | tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; |
07d29954 L |
3382 | break; |
3383 | case IPV4_FLOW: | |
3384 | case IPV6_FLOW: | |
3385 | tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; | |
3386 | break; | |
3387 | default: | |
3388 | return -EINVAL; | |
3389 | } | |
3390 | ||
3391 | if (!tuple_sets) | |
3392 | return 0; | |
3393 | ||
3394 | if (tuple_sets & HCLGE_D_PORT_BIT) | |
3395 | nfc->data |= RXH_L4_B_2_3; | |
3396 | if (tuple_sets & HCLGE_S_PORT_BIT) | |
3397 | nfc->data |= RXH_L4_B_0_1; | |
3398 | if (tuple_sets & HCLGE_D_IP_BIT) | |
3399 | nfc->data |= RXH_IP_DST; | |
3400 | if (tuple_sets & HCLGE_S_IP_BIT) | |
3401 | nfc->data |= RXH_IP_SRC; | |
3402 | ||
3403 | return 0; | |
3404 | } | |
3405 | ||
46a3df9f S |
3406 | static int hclge_get_tc_size(struct hnae3_handle *handle) |
3407 | { | |
3408 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3409 | struct hclge_dev *hdev = vport->back; | |
3410 | ||
3411 | return hdev->rss_size_max; | |
3412 | } | |
3413 | ||
77f255c1 | 3414 | int hclge_rss_init_hw(struct hclge_dev *hdev) |
46a3df9f | 3415 | { |
46a3df9f | 3416 | struct hclge_vport *vport = hdev->vport; |
8015bb74 YL |
3417 | u8 *rss_indir = vport[0].rss_indirection_tbl; |
3418 | u16 rss_size = vport[0].alloc_rss_size; | |
3419 | u8 *key = vport[0].rss_hash_key; | |
3420 | u8 hfunc = vport[0].rss_algo; | |
46a3df9f | 3421 | u16 tc_offset[HCLGE_MAX_TC_NUM]; |
46a3df9f S |
3422 | u16 tc_valid[HCLGE_MAX_TC_NUM]; |
3423 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
8015bb74 YL |
3424 | u16 roundup_size; |
3425 | int i, ret; | |
68ece54e | 3426 | |
46a3df9f S |
3427 | ret = hclge_set_rss_indir_table(hdev, rss_indir); |
3428 | if (ret) | |
8015bb74 | 3429 | return ret; |
46a3df9f | 3430 | |
46a3df9f S |
3431 | ret = hclge_set_rss_algo_key(hdev, hfunc, key); |
3432 | if (ret) | |
8015bb74 | 3433 | return ret; |
46a3df9f S |
3434 | |
3435 | ret = hclge_set_rss_input_tuple(hdev); | |
3436 | if (ret) | |
8015bb74 | 3437 | return ret; |
46a3df9f | 3438 | |
68ece54e YL |
3439 | /* Each TC have the same queue size, and tc_size set to hardware is |
3440 | * the log2 of roundup power of two of rss_size, the acutal queue | |
3441 | * size is limited by indirection table. | |
3442 | */ | |
3443 | if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { | |
3444 | dev_err(&hdev->pdev->dev, | |
3445 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | |
3446 | rss_size); | |
8015bb74 | 3447 | return -EINVAL; |
68ece54e YL |
3448 | } |
3449 | ||
3450 | roundup_size = roundup_pow_of_two(rss_size); | |
3451 | roundup_size = ilog2(roundup_size); | |
3452 | ||
46a3df9f | 3453 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
68ece54e | 3454 | tc_valid[i] = 0; |
46a3df9f | 3455 | |
68ece54e YL |
3456 | if (!(hdev->hw_tc_map & BIT(i))) |
3457 | continue; | |
3458 | ||
3459 | tc_valid[i] = 1; | |
3460 | tc_size[i] = roundup_size; | |
3461 | tc_offset[i] = rss_size * i; | |
46a3df9f | 3462 | } |
68ece54e | 3463 | |
8015bb74 YL |
3464 | return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); |
3465 | } | |
46a3df9f | 3466 | |
8015bb74 YL |
3467 | void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) |
3468 | { | |
3469 | struct hclge_vport *vport = hdev->vport; | |
3470 | int i, j; | |
46a3df9f | 3471 | |
8015bb74 YL |
3472 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { |
3473 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3474 | vport[j].rss_indirection_tbl[i] = | |
3475 | i % vport[j].alloc_rss_size; | |
3476 | } | |
3477 | } | |
3478 | ||
3479 | static void hclge_rss_init_cfg(struct hclge_dev *hdev) | |
3480 | { | |
3481 | struct hclge_vport *vport = hdev->vport; | |
3482 | int i; | |
3483 | ||
8015bb74 YL |
3484 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { |
3485 | vport[i].rss_tuple_sets.ipv4_tcp_en = | |
3486 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3487 | vport[i].rss_tuple_sets.ipv4_udp_en = | |
3488 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3489 | vport[i].rss_tuple_sets.ipv4_sctp_en = | |
3490 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3491 | vport[i].rss_tuple_sets.ipv4_fragment_en = | |
3492 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3493 | vport[i].rss_tuple_sets.ipv6_tcp_en = | |
3494 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3495 | vport[i].rss_tuple_sets.ipv6_udp_en = | |
3496 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3497 | vport[i].rss_tuple_sets.ipv6_sctp_en = | |
3498 | HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3499 | vport[i].rss_tuple_sets.ipv6_fragment_en = | |
3500 | HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3501 | ||
3502 | vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
823fe868 FL |
3503 | |
3504 | netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
8015bb74 YL |
3505 | } |
3506 | ||
3507 | hclge_rss_indir_init_cfg(hdev); | |
46a3df9f S |
3508 | } |
3509 | ||
63d7e66f SM |
3510 | int hclge_bind_ring_with_vector(struct hclge_vport *vport, |
3511 | int vector_id, bool en, | |
3512 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3513 | { |
3514 | struct hclge_dev *hdev = vport->back; | |
46a3df9f S |
3515 | struct hnae3_ring_chain_node *node; |
3516 | struct hclge_desc desc; | |
63d7e66f SM |
3517 | struct hclge_ctrl_vector_chain_cmd *req |
3518 | = (struct hclge_ctrl_vector_chain_cmd *)desc.data; | |
3519 | enum hclge_cmd_status status; | |
3520 | enum hclge_opcode_type op; | |
3521 | u16 tqp_type_and_id; | |
46a3df9f S |
3522 | int i; |
3523 | ||
63d7e66f SM |
3524 | op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; |
3525 | hclge_cmd_setup_basic_desc(&desc, op, false); | |
46a3df9f S |
3526 | req->int_vector_id = vector_id; |
3527 | ||
3528 | i = 0; | |
3529 | for (node = ring_chain; node; node = node->next) { | |
63d7e66f | 3530 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); |
ccc23ef3 PL |
3531 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, |
3532 | HCLGE_INT_TYPE_S, | |
3533 | hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); | |
3534 | hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, | |
3535 | HCLGE_TQP_ID_S, node->tqp_index); | |
3536 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, | |
3537 | HCLGE_INT_GL_IDX_S, | |
3538 | hnae3_get_field(node->int_gl_idx, | |
3539 | HNAE3_RING_GL_IDX_M, | |
3540 | HNAE3_RING_GL_IDX_S)); | |
63d7e66f | 3541 | req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); |
46a3df9f S |
3542 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
3543 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | |
63d7e66f | 3544 | req->vfid = vport->vport_id; |
46a3df9f | 3545 | |
63d7e66f SM |
3546 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
3547 | if (status) { | |
46a3df9f S |
3548 | dev_err(&hdev->pdev->dev, |
3549 | "Map TQP fail, status is %d.\n", | |
63d7e66f SM |
3550 | status); |
3551 | return -EIO; | |
46a3df9f S |
3552 | } |
3553 | i = 0; | |
3554 | ||
3555 | hclge_cmd_setup_basic_desc(&desc, | |
63d7e66f | 3556 | op, |
46a3df9f S |
3557 | false); |
3558 | req->int_vector_id = vector_id; | |
3559 | } | |
3560 | } | |
3561 | ||
3562 | if (i > 0) { | |
3563 | req->int_cause_num = i; | |
63d7e66f SM |
3564 | req->vfid = vport->vport_id; |
3565 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3566 | if (status) { | |
46a3df9f | 3567 | dev_err(&hdev->pdev->dev, |
63d7e66f SM |
3568 | "Map TQP fail, status is %d.\n", status); |
3569 | return -EIO; | |
46a3df9f S |
3570 | } |
3571 | } | |
3572 | ||
3573 | return 0; | |
3574 | } | |
3575 | ||
63d7e66f SM |
3576 | static int hclge_map_ring_to_vector(struct hnae3_handle *handle, |
3577 | int vector, | |
3578 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3579 | { |
3580 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3581 | struct hclge_dev *hdev = vport->back; | |
3582 | int vector_id; | |
3583 | ||
3584 | vector_id = hclge_get_vector_index(hdev, vector); | |
3585 | if (vector_id < 0) { | |
3586 | dev_err(&hdev->pdev->dev, | |
63d7e66f | 3587 | "Get vector index fail. vector_id =%d\n", vector_id); |
46a3df9f S |
3588 | return vector_id; |
3589 | } | |
3590 | ||
63d7e66f | 3591 | return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); |
46a3df9f S |
3592 | } |
3593 | ||
63d7e66f SM |
3594 | static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, |
3595 | int vector, | |
3596 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3597 | { |
3598 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3599 | struct hclge_dev *hdev = vport->back; | |
63d7e66f | 3600 | int vector_id, ret; |
46a3df9f | 3601 | |
f9637cc2 PL |
3602 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
3603 | return 0; | |
3604 | ||
46a3df9f S |
3605 | vector_id = hclge_get_vector_index(hdev, vector); |
3606 | if (vector_id < 0) { | |
3607 | dev_err(&handle->pdev->dev, | |
3608 | "Get vector index fail. ret =%d\n", vector_id); | |
3609 | return vector_id; | |
3610 | } | |
3611 | ||
63d7e66f | 3612 | ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); |
7412200c | 3613 | if (ret) |
63d7e66f SM |
3614 | dev_err(&handle->pdev->dev, |
3615 | "Unmap ring from vector fail. vectorid=%d, ret =%d\n", | |
3616 | vector_id, | |
3617 | ret); | |
46a3df9f | 3618 | |
7412200c | 3619 | return ret; |
46a3df9f S |
3620 | } |
3621 | ||
3622 | int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, | |
3623 | struct hclge_promisc_param *param) | |
3624 | { | |
d44f9b63 | 3625 | struct hclge_promisc_cfg_cmd *req; |
46a3df9f S |
3626 | struct hclge_desc desc; |
3627 | int ret; | |
3628 | ||
3629 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); | |
3630 | ||
d44f9b63 | 3631 | req = (struct hclge_promisc_cfg_cmd *)desc.data; |
46a3df9f | 3632 | req->vf_id = param->vf_id; |
4771e104 PL |
3633 | |
3634 | /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on | |
3635 | * pdev revision(0x20), new revision support them. The | |
3636 | * value of this two fields will not return error when driver | |
3637 | * send command to fireware in revision(0x20). | |
3638 | */ | |
3639 | req->flag = (param->enable << HCLGE_PROMISC_EN_B) | | |
3640 | HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; | |
46a3df9f S |
3641 | |
3642 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 3643 | if (ret) |
46a3df9f S |
3644 | dev_err(&hdev->pdev->dev, |
3645 | "Set promisc mode fail, status is %d.\n", ret); | |
90415e85 JS |
3646 | |
3647 | return ret; | |
46a3df9f S |
3648 | } |
3649 | ||
3650 | void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, | |
3651 | bool en_mc, bool en_bc, int vport_id) | |
3652 | { | |
3653 | if (!param) | |
3654 | return; | |
3655 | ||
3656 | memset(param, 0, sizeof(struct hclge_promisc_param)); | |
3657 | if (en_uc) | |
3658 | param->enable = HCLGE_PROMISC_EN_UC; | |
3659 | if (en_mc) | |
3660 | param->enable |= HCLGE_PROMISC_EN_MC; | |
3661 | if (en_bc) | |
3662 | param->enable |= HCLGE_PROMISC_EN_BC; | |
3663 | param->vf_id = vport_id; | |
3664 | } | |
3665 | ||
abe62a63 HT |
3666 | static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, |
3667 | bool en_mc_pmc) | |
46a3df9f S |
3668 | { |
3669 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3670 | struct hclge_dev *hdev = vport->back; | |
3671 | struct hclge_promisc_param param; | |
3672 | ||
e8600a3d PL |
3673 | hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, |
3674 | vport->vport_id); | |
abe62a63 | 3675 | return hclge_cmd_set_promisc_mode(hdev, ¶m); |
46a3df9f S |
3676 | } |
3677 | ||
10a954bc JS |
3678 | static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) |
3679 | { | |
3680 | struct hclge_get_fd_mode_cmd *req; | |
3681 | struct hclge_desc desc; | |
3682 | int ret; | |
3683 | ||
3684 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); | |
3685 | ||
3686 | req = (struct hclge_get_fd_mode_cmd *)desc.data; | |
3687 | ||
3688 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3689 | if (ret) { | |
3690 | dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); | |
3691 | return ret; | |
3692 | } | |
3693 | ||
3694 | *fd_mode = req->mode; | |
3695 | ||
3696 | return ret; | |
3697 | } | |
3698 | ||
3699 | static int hclge_get_fd_allocation(struct hclge_dev *hdev, | |
3700 | u32 *stage1_entry_num, | |
3701 | u32 *stage2_entry_num, | |
3702 | u16 *stage1_counter_num, | |
3703 | u16 *stage2_counter_num) | |
3704 | { | |
3705 | struct hclge_get_fd_allocation_cmd *req; | |
3706 | struct hclge_desc desc; | |
3707 | int ret; | |
3708 | ||
3709 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); | |
3710 | ||
3711 | req = (struct hclge_get_fd_allocation_cmd *)desc.data; | |
3712 | ||
3713 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3714 | if (ret) { | |
3715 | dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", | |
3716 | ret); | |
3717 | return ret; | |
3718 | } | |
3719 | ||
3720 | *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); | |
3721 | *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); | |
3722 | *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); | |
3723 | *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); | |
3724 | ||
3725 | return ret; | |
3726 | } | |
3727 | ||
3728 | static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) | |
3729 | { | |
3730 | struct hclge_set_fd_key_config_cmd *req; | |
3731 | struct hclge_fd_key_cfg *stage; | |
3732 | struct hclge_desc desc; | |
3733 | int ret; | |
3734 | ||
3735 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); | |
3736 | ||
3737 | req = (struct hclge_set_fd_key_config_cmd *)desc.data; | |
3738 | stage = &hdev->fd_cfg.key_cfg[stage_num]; | |
3739 | req->stage = stage_num; | |
3740 | req->key_select = stage->key_sel; | |
3741 | req->inner_sipv6_word_en = stage->inner_sipv6_word_en; | |
3742 | req->inner_dipv6_word_en = stage->inner_dipv6_word_en; | |
3743 | req->outer_sipv6_word_en = stage->outer_sipv6_word_en; | |
3744 | req->outer_dipv6_word_en = stage->outer_dipv6_word_en; | |
3745 | req->tuple_mask = cpu_to_le32(~stage->tuple_active); | |
3746 | req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); | |
3747 | ||
3748 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3749 | if (ret) | |
3750 | dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); | |
3751 | ||
3752 | return ret; | |
3753 | } | |
3754 | ||
3755 | static int hclge_init_fd_config(struct hclge_dev *hdev) | |
3756 | { | |
3757 | #define LOW_2_WORDS 0x03 | |
3758 | struct hclge_fd_key_cfg *key_cfg; | |
3759 | int ret; | |
3760 | ||
3761 | if (!hnae3_dev_fd_supported(hdev)) | |
3762 | return 0; | |
3763 | ||
3764 | ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); | |
3765 | if (ret) | |
3766 | return ret; | |
3767 | ||
3768 | switch (hdev->fd_cfg.fd_mode) { | |
3769 | case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: | |
3770 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; | |
3771 | break; | |
3772 | case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: | |
3773 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; | |
3774 | break; | |
3775 | default: | |
3776 | dev_err(&hdev->pdev->dev, | |
3777 | "Unsupported flow director mode %d\n", | |
3778 | hdev->fd_cfg.fd_mode); | |
3779 | return -EOPNOTSUPP; | |
3780 | } | |
3781 | ||
3782 | hdev->fd_cfg.fd_en = true; | |
3783 | hdev->fd_cfg.proto_support = | |
3784 | TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | | |
3785 | UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; | |
3786 | key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; | |
3787 | key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, | |
3788 | key_cfg->inner_sipv6_word_en = LOW_2_WORDS; | |
3789 | key_cfg->inner_dipv6_word_en = LOW_2_WORDS; | |
3790 | key_cfg->outer_sipv6_word_en = 0; | |
3791 | key_cfg->outer_dipv6_word_en = 0; | |
3792 | ||
3793 | key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | | |
3794 | BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | | |
3795 | BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
3796 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
3797 | ||
3798 | /* If use max 400bit key, we can support tuples for ether type */ | |
3799 | if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { | |
3800 | hdev->fd_cfg.proto_support |= ETHER_FLOW; | |
3801 | key_cfg->tuple_active |= | |
3802 | BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); | |
3803 | } | |
3804 | ||
3805 | /* roce_type is used to filter roce frames | |
3806 | * dst_vport is used to specify the rule | |
3807 | */ | |
3808 | key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); | |
3809 | ||
3810 | ret = hclge_get_fd_allocation(hdev, | |
3811 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], | |
3812 | &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], | |
3813 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], | |
3814 | &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); | |
3815 | if (ret) | |
3816 | return ret; | |
3817 | ||
3818 | return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); | |
3819 | } | |
3820 | ||
7b829126 JS |
3821 | static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, |
3822 | int loc, u8 *key, bool is_add) | |
3823 | { | |
3824 | struct hclge_fd_tcam_config_1_cmd *req1; | |
3825 | struct hclge_fd_tcam_config_2_cmd *req2; | |
3826 | struct hclge_fd_tcam_config_3_cmd *req3; | |
3827 | struct hclge_desc desc[3]; | |
3828 | int ret; | |
3829 | ||
3830 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); | |
3831 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3832 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); | |
3833 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3834 | hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); | |
3835 | ||
3836 | req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; | |
3837 | req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; | |
3838 | req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; | |
3839 | ||
3840 | req1->stage = stage; | |
3841 | req1->xy_sel = sel_x ? 1 : 0; | |
3842 | hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); | |
3843 | req1->index = cpu_to_le32(loc); | |
3844 | req1->entry_vld = sel_x ? is_add : 0; | |
3845 | ||
3846 | if (key) { | |
3847 | memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); | |
3848 | memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], | |
3849 | sizeof(req2->tcam_data)); | |
3850 | memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + | |
3851 | sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); | |
3852 | } | |
3853 | ||
3854 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
3855 | if (ret) | |
3856 | dev_err(&hdev->pdev->dev, | |
3857 | "config tcam key fail, ret=%d\n", | |
3858 | ret); | |
3859 | ||
3860 | return ret; | |
3861 | } | |
3862 | ||
3863 | static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, | |
3864 | struct hclge_fd_ad_data *action) | |
3865 | { | |
3866 | struct hclge_fd_ad_config_cmd *req; | |
3867 | struct hclge_desc desc; | |
3868 | u64 ad_data = 0; | |
3869 | int ret; | |
3870 | ||
3871 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); | |
3872 | ||
3873 | req = (struct hclge_fd_ad_config_cmd *)desc.data; | |
3874 | req->index = cpu_to_le32(loc); | |
3875 | req->stage = stage; | |
3876 | ||
3877 | hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, | |
3878 | action->write_rule_id_to_bd); | |
3879 | hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, | |
3880 | action->rule_id); | |
3881 | ad_data <<= 32; | |
3882 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); | |
3883 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, | |
3884 | action->forward_to_direct_queue); | |
3885 | hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, | |
3886 | action->queue_id); | |
3887 | hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); | |
3888 | hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, | |
3889 | HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); | |
3890 | hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); | |
3891 | hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, | |
3892 | action->counter_id); | |
3893 | ||
3894 | req->ad_data = cpu_to_le64(ad_data); | |
3895 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3896 | if (ret) | |
3897 | dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); | |
3898 | ||
3899 | return ret; | |
3900 | } | |
3901 | ||
3902 | static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, | |
3903 | struct hclge_fd_rule *rule) | |
3904 | { | |
3905 | u16 tmp_x_s, tmp_y_s; | |
3906 | u32 tmp_x_l, tmp_y_l; | |
3907 | int i; | |
3908 | ||
3909 | if (rule->unused_tuple & tuple_bit) | |
3910 | return true; | |
3911 | ||
3912 | switch (tuple_bit) { | |
3913 | case 0: | |
3914 | return false; | |
3915 | case BIT(INNER_DST_MAC): | |
3916 | for (i = 0; i < 6; i++) { | |
3917 | calc_x(key_x[5 - i], rule->tuples.dst_mac[i], | |
3918 | rule->tuples_mask.dst_mac[i]); | |
3919 | calc_y(key_y[5 - i], rule->tuples.dst_mac[i], | |
3920 | rule->tuples_mask.dst_mac[i]); | |
3921 | } | |
3922 | ||
3923 | return true; | |
3924 | case BIT(INNER_SRC_MAC): | |
3925 | for (i = 0; i < 6; i++) { | |
3926 | calc_x(key_x[5 - i], rule->tuples.src_mac[i], | |
3927 | rule->tuples.src_mac[i]); | |
3928 | calc_y(key_y[5 - i], rule->tuples.src_mac[i], | |
3929 | rule->tuples.src_mac[i]); | |
3930 | } | |
3931 | ||
3932 | return true; | |
3933 | case BIT(INNER_VLAN_TAG_FST): | |
3934 | calc_x(tmp_x_s, rule->tuples.vlan_tag1, | |
3935 | rule->tuples_mask.vlan_tag1); | |
3936 | calc_y(tmp_y_s, rule->tuples.vlan_tag1, | |
3937 | rule->tuples_mask.vlan_tag1); | |
3938 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3939 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3940 | ||
3941 | return true; | |
3942 | case BIT(INNER_ETH_TYPE): | |
3943 | calc_x(tmp_x_s, rule->tuples.ether_proto, | |
3944 | rule->tuples_mask.ether_proto); | |
3945 | calc_y(tmp_y_s, rule->tuples.ether_proto, | |
3946 | rule->tuples_mask.ether_proto); | |
3947 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3948 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3949 | ||
3950 | return true; | |
3951 | case BIT(INNER_IP_TOS): | |
3952 | calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
3953 | calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); | |
3954 | ||
3955 | return true; | |
3956 | case BIT(INNER_IP_PROTO): | |
3957 | calc_x(*key_x, rule->tuples.ip_proto, | |
3958 | rule->tuples_mask.ip_proto); | |
3959 | calc_y(*key_y, rule->tuples.ip_proto, | |
3960 | rule->tuples_mask.ip_proto); | |
3961 | ||
3962 | return true; | |
3963 | case BIT(INNER_SRC_IP): | |
3964 | calc_x(tmp_x_l, rule->tuples.src_ip[3], | |
3965 | rule->tuples_mask.src_ip[3]); | |
3966 | calc_y(tmp_y_l, rule->tuples.src_ip[3], | |
3967 | rule->tuples_mask.src_ip[3]); | |
3968 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
3969 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
3970 | ||
3971 | return true; | |
3972 | case BIT(INNER_DST_IP): | |
3973 | calc_x(tmp_x_l, rule->tuples.dst_ip[3], | |
3974 | rule->tuples_mask.dst_ip[3]); | |
3975 | calc_y(tmp_y_l, rule->tuples.dst_ip[3], | |
3976 | rule->tuples_mask.dst_ip[3]); | |
3977 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); | |
3978 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); | |
3979 | ||
3980 | return true; | |
3981 | case BIT(INNER_SRC_PORT): | |
3982 | calc_x(tmp_x_s, rule->tuples.src_port, | |
3983 | rule->tuples_mask.src_port); | |
3984 | calc_y(tmp_y_s, rule->tuples.src_port, | |
3985 | rule->tuples_mask.src_port); | |
3986 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3987 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3988 | ||
3989 | return true; | |
3990 | case BIT(INNER_DST_PORT): | |
3991 | calc_x(tmp_x_s, rule->tuples.dst_port, | |
3992 | rule->tuples_mask.dst_port); | |
3993 | calc_y(tmp_y_s, rule->tuples.dst_port, | |
3994 | rule->tuples_mask.dst_port); | |
3995 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); | |
3996 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); | |
3997 | ||
3998 | return true; | |
3999 | default: | |
4000 | return false; | |
4001 | } | |
4002 | } | |
4003 | ||
4004 | static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, | |
4005 | u8 vf_id, u8 network_port_id) | |
4006 | { | |
4007 | u32 port_number = 0; | |
4008 | ||
4009 | if (port_type == HOST_PORT) { | |
4010 | hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, | |
4011 | pf_id); | |
4012 | hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, | |
4013 | vf_id); | |
4014 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); | |
4015 | } else { | |
4016 | hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, | |
4017 | HCLGE_NETWORK_PORT_ID_S, network_port_id); | |
4018 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); | |
4019 | } | |
4020 | ||
4021 | return port_number; | |
4022 | } | |
4023 | ||
4024 | static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, | |
4025 | __le32 *key_x, __le32 *key_y, | |
4026 | struct hclge_fd_rule *rule) | |
4027 | { | |
4028 | u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; | |
4029 | u8 cur_pos = 0, tuple_size, shift_bits; | |
4030 | int i; | |
4031 | ||
4032 | for (i = 0; i < MAX_META_DATA; i++) { | |
4033 | tuple_size = meta_data_key_info[i].key_length; | |
4034 | tuple_bit = key_cfg->meta_data_active & BIT(i); | |
4035 | ||
4036 | switch (tuple_bit) { | |
4037 | case BIT(ROCE_TYPE): | |
4038 | hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); | |
4039 | cur_pos += tuple_size; | |
4040 | break; | |
4041 | case BIT(DST_VPORT): | |
4042 | port_number = hclge_get_port_number(HOST_PORT, 0, | |
4043 | rule->vf_id, 0); | |
4044 | hnae3_set_field(meta_data, | |
4045 | GENMASK(cur_pos + tuple_size, cur_pos), | |
4046 | cur_pos, port_number); | |
4047 | cur_pos += tuple_size; | |
4048 | break; | |
4049 | default: | |
4050 | break; | |
4051 | } | |
4052 | } | |
4053 | ||
4054 | calc_x(tmp_x, meta_data, 0xFFFFFFFF); | |
4055 | calc_y(tmp_y, meta_data, 0xFFFFFFFF); | |
4056 | shift_bits = sizeof(meta_data) * 8 - cur_pos; | |
4057 | ||
4058 | *key_x = cpu_to_le32(tmp_x << shift_bits); | |
4059 | *key_y = cpu_to_le32(tmp_y << shift_bits); | |
4060 | } | |
4061 | ||
4062 | /* A complete key is combined with meta data key and tuple key. | |
4063 | * Meta data key is stored at the MSB region, and tuple key is stored at | |
4064 | * the LSB region, unused bits will be filled 0. | |
4065 | */ | |
4066 | static int hclge_config_key(struct hclge_dev *hdev, u8 stage, | |
4067 | struct hclge_fd_rule *rule) | |
4068 | { | |
4069 | struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; | |
4070 | u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; | |
4071 | u8 *cur_key_x, *cur_key_y; | |
4072 | int i, ret, tuple_size; | |
4073 | u8 meta_data_region; | |
4074 | ||
4075 | memset(key_x, 0, sizeof(key_x)); | |
4076 | memset(key_y, 0, sizeof(key_y)); | |
4077 | cur_key_x = key_x; | |
4078 | cur_key_y = key_y; | |
4079 | ||
4080 | for (i = 0 ; i < MAX_TUPLE; i++) { | |
4081 | bool tuple_valid; | |
4082 | u32 check_tuple; | |
4083 | ||
4084 | tuple_size = tuple_key_info[i].key_length / 8; | |
4085 | check_tuple = key_cfg->tuple_active & BIT(i); | |
4086 | ||
4087 | tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, | |
4088 | cur_key_y, rule); | |
4089 | if (tuple_valid) { | |
4090 | cur_key_x += tuple_size; | |
4091 | cur_key_y += tuple_size; | |
4092 | } | |
4093 | } | |
4094 | ||
4095 | meta_data_region = hdev->fd_cfg.max_key_length / 8 - | |
4096 | MAX_META_DATA_LENGTH / 8; | |
4097 | ||
4098 | hclge_fd_convert_meta_data(key_cfg, | |
4099 | (__le32 *)(key_x + meta_data_region), | |
4100 | (__le32 *)(key_y + meta_data_region), | |
4101 | rule); | |
4102 | ||
4103 | ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, | |
4104 | true); | |
4105 | if (ret) { | |
4106 | dev_err(&hdev->pdev->dev, | |
4107 | "fd key_y config fail, loc=%d, ret=%d\n", | |
4108 | rule->queue_id, ret); | |
4109 | return ret; | |
4110 | } | |
4111 | ||
4112 | ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, | |
4113 | true); | |
4114 | if (ret) | |
4115 | dev_err(&hdev->pdev->dev, | |
4116 | "fd key_x config fail, loc=%d, ret=%d\n", | |
4117 | rule->queue_id, ret); | |
4118 | return ret; | |
4119 | } | |
4120 | ||
4121 | static int hclge_config_action(struct hclge_dev *hdev, u8 stage, | |
4122 | struct hclge_fd_rule *rule) | |
4123 | { | |
4124 | struct hclge_fd_ad_data ad_data; | |
4125 | ||
4126 | ad_data.ad_id = rule->location; | |
4127 | ||
4128 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
4129 | ad_data.drop_packet = true; | |
4130 | ad_data.forward_to_direct_queue = false; | |
4131 | ad_data.queue_id = 0; | |
4132 | } else { | |
4133 | ad_data.drop_packet = false; | |
4134 | ad_data.forward_to_direct_queue = true; | |
4135 | ad_data.queue_id = rule->queue_id; | |
4136 | } | |
4137 | ||
4138 | ad_data.use_counter = false; | |
4139 | ad_data.counter_id = 0; | |
4140 | ||
4141 | ad_data.use_next_stage = false; | |
4142 | ad_data.next_input_key = 0; | |
4143 | ||
4144 | ad_data.write_rule_id_to_bd = true; | |
4145 | ad_data.rule_id = rule->location; | |
4146 | ||
4147 | return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); | |
4148 | } | |
4149 | ||
3ca8e27c JS |
4150 | static int hclge_fd_check_spec(struct hclge_dev *hdev, |
4151 | struct ethtool_rx_flow_spec *fs, u32 *unused) | |
4152 | { | |
4153 | struct ethtool_tcpip4_spec *tcp_ip4_spec; | |
4154 | struct ethtool_usrip4_spec *usr_ip4_spec; | |
4155 | struct ethtool_tcpip6_spec *tcp_ip6_spec; | |
4156 | struct ethtool_usrip6_spec *usr_ip6_spec; | |
4157 | struct ethhdr *ether_spec; | |
4158 | ||
4159 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4160 | return -EINVAL; | |
4161 | ||
4162 | if (!(fs->flow_type & hdev->fd_cfg.proto_support)) | |
4163 | return -EOPNOTSUPP; | |
4164 | ||
4165 | if ((fs->flow_type & FLOW_EXT) && | |
4166 | (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { | |
4167 | dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); | |
4168 | return -EOPNOTSUPP; | |
4169 | } | |
4170 | ||
4171 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4172 | case SCTP_V4_FLOW: | |
4173 | case TCP_V4_FLOW: | |
4174 | case UDP_V4_FLOW: | |
4175 | tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; | |
4176 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); | |
4177 | ||
4178 | if (!tcp_ip4_spec->ip4src) | |
4179 | *unused |= BIT(INNER_SRC_IP); | |
4180 | ||
4181 | if (!tcp_ip4_spec->ip4dst) | |
4182 | *unused |= BIT(INNER_DST_IP); | |
4183 | ||
4184 | if (!tcp_ip4_spec->psrc) | |
4185 | *unused |= BIT(INNER_SRC_PORT); | |
4186 | ||
4187 | if (!tcp_ip4_spec->pdst) | |
4188 | *unused |= BIT(INNER_DST_PORT); | |
4189 | ||
4190 | if (!tcp_ip4_spec->tos) | |
4191 | *unused |= BIT(INNER_IP_TOS); | |
4192 | ||
4193 | break; | |
4194 | case IP_USER_FLOW: | |
4195 | usr_ip4_spec = &fs->h_u.usr_ip4_spec; | |
4196 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4197 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); | |
4198 | ||
4199 | if (!usr_ip4_spec->ip4src) | |
4200 | *unused |= BIT(INNER_SRC_IP); | |
4201 | ||
4202 | if (!usr_ip4_spec->ip4dst) | |
4203 | *unused |= BIT(INNER_DST_IP); | |
4204 | ||
4205 | if (!usr_ip4_spec->tos) | |
4206 | *unused |= BIT(INNER_IP_TOS); | |
4207 | ||
4208 | if (!usr_ip4_spec->proto) | |
4209 | *unused |= BIT(INNER_IP_PROTO); | |
4210 | ||
4211 | if (usr_ip4_spec->l4_4_bytes) | |
4212 | return -EOPNOTSUPP; | |
4213 | ||
4214 | if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) | |
4215 | return -EOPNOTSUPP; | |
4216 | ||
4217 | break; | |
4218 | case SCTP_V6_FLOW: | |
4219 | case TCP_V6_FLOW: | |
4220 | case UDP_V6_FLOW: | |
4221 | tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; | |
4222 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4223 | BIT(INNER_IP_TOS); | |
4224 | ||
4225 | if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && | |
4226 | !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) | |
4227 | *unused |= BIT(INNER_SRC_IP); | |
4228 | ||
4229 | if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && | |
4230 | !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) | |
4231 | *unused |= BIT(INNER_DST_IP); | |
4232 | ||
4233 | if (!tcp_ip6_spec->psrc) | |
4234 | *unused |= BIT(INNER_SRC_PORT); | |
4235 | ||
4236 | if (!tcp_ip6_spec->pdst) | |
4237 | *unused |= BIT(INNER_DST_PORT); | |
4238 | ||
4239 | if (tcp_ip6_spec->tclass) | |
4240 | return -EOPNOTSUPP; | |
4241 | ||
4242 | break; | |
4243 | case IPV6_USER_FLOW: | |
4244 | usr_ip6_spec = &fs->h_u.usr_ip6_spec; | |
4245 | *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | | |
4246 | BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | | |
4247 | BIT(INNER_DST_PORT); | |
4248 | ||
4249 | if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && | |
4250 | !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) | |
4251 | *unused |= BIT(INNER_SRC_IP); | |
4252 | ||
4253 | if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && | |
4254 | !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) | |
4255 | *unused |= BIT(INNER_DST_IP); | |
4256 | ||
4257 | if (!usr_ip6_spec->l4_proto) | |
4258 | *unused |= BIT(INNER_IP_PROTO); | |
4259 | ||
4260 | if (usr_ip6_spec->tclass) | |
4261 | return -EOPNOTSUPP; | |
4262 | ||
4263 | if (usr_ip6_spec->l4_4_bytes) | |
4264 | return -EOPNOTSUPP; | |
4265 | ||
4266 | break; | |
4267 | case ETHER_FLOW: | |
4268 | ether_spec = &fs->h_u.ether_spec; | |
4269 | *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | | |
4270 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | | |
4271 | BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); | |
4272 | ||
4273 | if (is_zero_ether_addr(ether_spec->h_source)) | |
4274 | *unused |= BIT(INNER_SRC_MAC); | |
4275 | ||
4276 | if (is_zero_ether_addr(ether_spec->h_dest)) | |
4277 | *unused |= BIT(INNER_DST_MAC); | |
4278 | ||
4279 | if (!ether_spec->h_proto) | |
4280 | *unused |= BIT(INNER_ETH_TYPE); | |
4281 | ||
4282 | break; | |
4283 | default: | |
4284 | return -EOPNOTSUPP; | |
4285 | } | |
4286 | ||
4287 | if ((fs->flow_type & FLOW_EXT)) { | |
4288 | if (fs->h_ext.vlan_etype) | |
4289 | return -EOPNOTSUPP; | |
4290 | if (!fs->h_ext.vlan_tci) | |
4291 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4292 | ||
4293 | if (fs->m_ext.vlan_tci) { | |
4294 | if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) | |
4295 | return -EINVAL; | |
4296 | } | |
4297 | } else { | |
4298 | *unused |= BIT(INNER_VLAN_TAG_FST); | |
4299 | } | |
4300 | ||
4301 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4302 | if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) | |
4303 | return -EOPNOTSUPP; | |
4304 | ||
4305 | if (is_zero_ether_addr(fs->h_ext.h_dest)) | |
4306 | *unused |= BIT(INNER_DST_MAC); | |
4307 | else | |
4308 | *unused &= ~(BIT(INNER_DST_MAC)); | |
4309 | } | |
4310 | ||
4311 | return 0; | |
4312 | } | |
4313 | ||
4314 | static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) | |
4315 | { | |
4316 | struct hclge_fd_rule *rule = NULL; | |
4317 | struct hlist_node *node2; | |
4318 | ||
4319 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4320 | if (rule->location >= location) | |
4321 | break; | |
4322 | } | |
4323 | ||
4324 | return rule && rule->location == location; | |
4325 | } | |
4326 | ||
4327 | static int hclge_fd_update_rule_list(struct hclge_dev *hdev, | |
4328 | struct hclge_fd_rule *new_rule, | |
4329 | u16 location, | |
4330 | bool is_add) | |
4331 | { | |
4332 | struct hclge_fd_rule *rule = NULL, *parent = NULL; | |
4333 | struct hlist_node *node2; | |
4334 | ||
4335 | if (is_add && !new_rule) | |
4336 | return -EINVAL; | |
4337 | ||
4338 | hlist_for_each_entry_safe(rule, node2, | |
4339 | &hdev->fd_rule_list, rule_node) { | |
4340 | if (rule->location >= location) | |
4341 | break; | |
4342 | parent = rule; | |
4343 | } | |
4344 | ||
4345 | if (rule && rule->location == location) { | |
4346 | hlist_del(&rule->rule_node); | |
4347 | kfree(rule); | |
4348 | hdev->hclge_fd_rule_num--; | |
4349 | ||
4350 | if (!is_add) | |
4351 | return 0; | |
4352 | ||
4353 | } else if (!is_add) { | |
4354 | dev_err(&hdev->pdev->dev, | |
4355 | "delete fail, rule %d is inexistent\n", | |
4356 | location); | |
4357 | return -EINVAL; | |
4358 | } | |
4359 | ||
4360 | INIT_HLIST_NODE(&new_rule->rule_node); | |
4361 | ||
4362 | if (parent) | |
4363 | hlist_add_behind(&new_rule->rule_node, &parent->rule_node); | |
4364 | else | |
4365 | hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); | |
4366 | ||
4367 | hdev->hclge_fd_rule_num++; | |
4368 | ||
4369 | return 0; | |
4370 | } | |
4371 | ||
4372 | static int hclge_fd_get_tuple(struct hclge_dev *hdev, | |
4373 | struct ethtool_rx_flow_spec *fs, | |
4374 | struct hclge_fd_rule *rule) | |
4375 | { | |
4376 | u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); | |
4377 | ||
4378 | switch (flow_type) { | |
4379 | case SCTP_V4_FLOW: | |
4380 | case TCP_V4_FLOW: | |
4381 | case UDP_V4_FLOW: | |
4382 | rule->tuples.src_ip[3] = | |
4383 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); | |
4384 | rule->tuples_mask.src_ip[3] = | |
4385 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); | |
4386 | ||
4387 | rule->tuples.dst_ip[3] = | |
4388 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); | |
4389 | rule->tuples_mask.dst_ip[3] = | |
4390 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); | |
4391 | ||
4392 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); | |
4393 | rule->tuples_mask.src_port = | |
4394 | be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); | |
4395 | ||
4396 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); | |
4397 | rule->tuples_mask.dst_port = | |
4398 | be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); | |
4399 | ||
4400 | rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; | |
4401 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; | |
4402 | ||
4403 | rule->tuples.ether_proto = ETH_P_IP; | |
4404 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4405 | ||
4406 | break; | |
4407 | case IP_USER_FLOW: | |
4408 | rule->tuples.src_ip[3] = | |
4409 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); | |
4410 | rule->tuples_mask.src_ip[3] = | |
4411 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); | |
4412 | ||
4413 | rule->tuples.dst_ip[3] = | |
4414 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); | |
4415 | rule->tuples_mask.dst_ip[3] = | |
4416 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); | |
4417 | ||
4418 | rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; | |
4419 | rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; | |
4420 | ||
4421 | rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; | |
4422 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; | |
4423 | ||
4424 | rule->tuples.ether_proto = ETH_P_IP; | |
4425 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4426 | ||
4427 | break; | |
4428 | case SCTP_V6_FLOW: | |
4429 | case TCP_V6_FLOW: | |
4430 | case UDP_V6_FLOW: | |
4431 | be32_to_cpu_array(rule->tuples.src_ip, | |
4432 | fs->h_u.tcp_ip6_spec.ip6src, 4); | |
4433 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4434 | fs->m_u.tcp_ip6_spec.ip6src, 4); | |
4435 | ||
4436 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4437 | fs->h_u.tcp_ip6_spec.ip6dst, 4); | |
4438 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4439 | fs->m_u.tcp_ip6_spec.ip6dst, 4); | |
4440 | ||
4441 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); | |
4442 | rule->tuples_mask.src_port = | |
4443 | be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); | |
4444 | ||
4445 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); | |
4446 | rule->tuples_mask.dst_port = | |
4447 | be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); | |
4448 | ||
4449 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4450 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4451 | ||
4452 | break; | |
4453 | case IPV6_USER_FLOW: | |
4454 | be32_to_cpu_array(rule->tuples.src_ip, | |
4455 | fs->h_u.usr_ip6_spec.ip6src, 4); | |
4456 | be32_to_cpu_array(rule->tuples_mask.src_ip, | |
4457 | fs->m_u.usr_ip6_spec.ip6src, 4); | |
4458 | ||
4459 | be32_to_cpu_array(rule->tuples.dst_ip, | |
4460 | fs->h_u.usr_ip6_spec.ip6dst, 4); | |
4461 | be32_to_cpu_array(rule->tuples_mask.dst_ip, | |
4462 | fs->m_u.usr_ip6_spec.ip6dst, 4); | |
4463 | ||
4464 | rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; | |
4465 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; | |
4466 | ||
4467 | rule->tuples.ether_proto = ETH_P_IPV6; | |
4468 | rule->tuples_mask.ether_proto = 0xFFFF; | |
4469 | ||
4470 | break; | |
4471 | case ETHER_FLOW: | |
4472 | ether_addr_copy(rule->tuples.src_mac, | |
4473 | fs->h_u.ether_spec.h_source); | |
4474 | ether_addr_copy(rule->tuples_mask.src_mac, | |
4475 | fs->m_u.ether_spec.h_source); | |
4476 | ||
4477 | ether_addr_copy(rule->tuples.dst_mac, | |
4478 | fs->h_u.ether_spec.h_dest); | |
4479 | ether_addr_copy(rule->tuples_mask.dst_mac, | |
4480 | fs->m_u.ether_spec.h_dest); | |
4481 | ||
4482 | rule->tuples.ether_proto = | |
4483 | be16_to_cpu(fs->h_u.ether_spec.h_proto); | |
4484 | rule->tuples_mask.ether_proto = | |
4485 | be16_to_cpu(fs->m_u.ether_spec.h_proto); | |
4486 | ||
4487 | break; | |
4488 | default: | |
4489 | return -EOPNOTSUPP; | |
4490 | } | |
4491 | ||
4492 | switch (flow_type) { | |
4493 | case SCTP_V4_FLOW: | |
4494 | case SCTP_V6_FLOW: | |
4495 | rule->tuples.ip_proto = IPPROTO_SCTP; | |
4496 | rule->tuples_mask.ip_proto = 0xFF; | |
4497 | break; | |
4498 | case TCP_V4_FLOW: | |
4499 | case TCP_V6_FLOW: | |
4500 | rule->tuples.ip_proto = IPPROTO_TCP; | |
4501 | rule->tuples_mask.ip_proto = 0xFF; | |
4502 | break; | |
4503 | case UDP_V4_FLOW: | |
4504 | case UDP_V6_FLOW: | |
4505 | rule->tuples.ip_proto = IPPROTO_UDP; | |
4506 | rule->tuples_mask.ip_proto = 0xFF; | |
4507 | break; | |
4508 | default: | |
4509 | break; | |
4510 | } | |
4511 | ||
4512 | if ((fs->flow_type & FLOW_EXT)) { | |
4513 | rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); | |
4514 | rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); | |
4515 | } | |
4516 | ||
4517 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4518 | ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); | |
4519 | ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); | |
4520 | } | |
4521 | ||
4522 | return 0; | |
4523 | } | |
4524 | ||
4525 | static int hclge_add_fd_entry(struct hnae3_handle *handle, | |
4526 | struct ethtool_rxnfc *cmd) | |
4527 | { | |
4528 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4529 | struct hclge_dev *hdev = vport->back; | |
4530 | u16 dst_vport_id = 0, q_index = 0; | |
4531 | struct ethtool_rx_flow_spec *fs; | |
4532 | struct hclge_fd_rule *rule; | |
4533 | u32 unused = 0; | |
4534 | u8 action; | |
4535 | int ret; | |
4536 | ||
4537 | if (!hnae3_dev_fd_supported(hdev)) | |
4538 | return -EOPNOTSUPP; | |
4539 | ||
4540 | if (!hdev->fd_cfg.fd_en) { | |
4541 | dev_warn(&hdev->pdev->dev, | |
4542 | "Please enable flow director first\n"); | |
4543 | return -EOPNOTSUPP; | |
4544 | } | |
4545 | ||
4546 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4547 | ||
4548 | ret = hclge_fd_check_spec(hdev, fs, &unused); | |
4549 | if (ret) { | |
4550 | dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); | |
4551 | return ret; | |
4552 | } | |
4553 | ||
4554 | if (fs->ring_cookie == RX_CLS_FLOW_DISC) { | |
4555 | action = HCLGE_FD_ACTION_DROP_PACKET; | |
4556 | } else { | |
4557 | u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); | |
4558 | u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); | |
4559 | u16 tqps; | |
4560 | ||
4561 | dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; | |
4562 | tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; | |
4563 | ||
4564 | if (ring >= tqps) { | |
4565 | dev_err(&hdev->pdev->dev, | |
4566 | "Error: queue id (%d) > max tqp num (%d)\n", | |
4567 | ring, tqps - 1); | |
4568 | return -EINVAL; | |
4569 | } | |
4570 | ||
4571 | if (vf > hdev->num_req_vfs) { | |
4572 | dev_err(&hdev->pdev->dev, | |
4573 | "Error: vf id (%d) > max vf num (%d)\n", | |
4574 | vf, hdev->num_req_vfs); | |
4575 | return -EINVAL; | |
4576 | } | |
4577 | ||
4578 | action = HCLGE_FD_ACTION_ACCEPT_PACKET; | |
4579 | q_index = ring; | |
4580 | } | |
4581 | ||
4582 | rule = kzalloc(sizeof(*rule), GFP_KERNEL); | |
4583 | if (!rule) | |
4584 | return -ENOMEM; | |
4585 | ||
4586 | ret = hclge_fd_get_tuple(hdev, fs, rule); | |
4587 | if (ret) | |
4588 | goto free_rule; | |
4589 | ||
4590 | rule->flow_type = fs->flow_type; | |
4591 | ||
4592 | rule->location = fs->location; | |
4593 | rule->unused_tuple = unused; | |
4594 | rule->vf_id = dst_vport_id; | |
4595 | rule->queue_id = q_index; | |
4596 | rule->action = action; | |
4597 | ||
4598 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4599 | if (ret) | |
4600 | goto free_rule; | |
4601 | ||
4602 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4603 | if (ret) | |
4604 | goto free_rule; | |
4605 | ||
4606 | ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); | |
4607 | if (ret) | |
4608 | goto free_rule; | |
4609 | ||
4610 | return ret; | |
4611 | ||
4612 | free_rule: | |
4613 | kfree(rule); | |
4614 | return ret; | |
4615 | } | |
4616 | ||
4617 | static int hclge_del_fd_entry(struct hnae3_handle *handle, | |
4618 | struct ethtool_rxnfc *cmd) | |
4619 | { | |
4620 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4621 | struct hclge_dev *hdev = vport->back; | |
4622 | struct ethtool_rx_flow_spec *fs; | |
4623 | int ret; | |
4624 | ||
4625 | if (!hnae3_dev_fd_supported(hdev)) | |
4626 | return -EOPNOTSUPP; | |
4627 | ||
4628 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4629 | ||
4630 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) | |
4631 | return -EINVAL; | |
4632 | ||
4633 | if (!hclge_fd_rule_exist(hdev, fs->location)) { | |
4634 | dev_err(&hdev->pdev->dev, | |
4635 | "Delete fail, rule %d is inexistent\n", | |
4636 | fs->location); | |
4637 | return -ENOENT; | |
4638 | } | |
4639 | ||
4640 | ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4641 | fs->location, NULL, false); | |
4642 | if (ret) | |
4643 | return ret; | |
4644 | ||
4645 | return hclge_fd_update_rule_list(hdev, NULL, fs->location, | |
4646 | false); | |
4647 | } | |
4648 | ||
7ce98982 JS |
4649 | static void hclge_del_all_fd_entries(struct hnae3_handle *handle, |
4650 | bool clear_list) | |
4651 | { | |
4652 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4653 | struct hclge_dev *hdev = vport->back; | |
4654 | struct hclge_fd_rule *rule; | |
4655 | struct hlist_node *node; | |
4656 | ||
4657 | if (!hnae3_dev_fd_supported(hdev)) | |
4658 | return; | |
4659 | ||
4660 | if (clear_list) { | |
4661 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4662 | rule_node) { | |
4663 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4664 | rule->location, NULL, false); | |
4665 | hlist_del(&rule->rule_node); | |
4666 | kfree(rule); | |
4667 | hdev->hclge_fd_rule_num--; | |
4668 | } | |
4669 | } else { | |
4670 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, | |
4671 | rule_node) | |
4672 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, | |
4673 | rule->location, NULL, false); | |
4674 | } | |
4675 | } | |
4676 | ||
4677 | static int hclge_restore_fd_entries(struct hnae3_handle *handle) | |
4678 | { | |
4679 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4680 | struct hclge_dev *hdev = vport->back; | |
4681 | struct hclge_fd_rule *rule; | |
4682 | struct hlist_node *node; | |
4683 | int ret; | |
4684 | ||
1afdb53a HT |
4685 | /* Return ok here, because reset error handling will check this |
4686 | * return value. If error is returned here, the reset process will | |
4687 | * fail. | |
4688 | */ | |
7ce98982 | 4689 | if (!hnae3_dev_fd_supported(hdev)) |
1afdb53a | 4690 | return 0; |
7ce98982 JS |
4691 | |
4692 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { | |
4693 | ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); | |
4694 | if (!ret) | |
4695 | ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); | |
4696 | ||
4697 | if (ret) { | |
4698 | dev_warn(&hdev->pdev->dev, | |
4699 | "Restore rule %d failed, remove it\n", | |
4700 | rule->location); | |
4701 | hlist_del(&rule->rule_node); | |
4702 | kfree(rule); | |
4703 | hdev->hclge_fd_rule_num--; | |
4704 | } | |
4705 | } | |
4706 | return 0; | |
4707 | } | |
4708 | ||
295043a7 JS |
4709 | static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, |
4710 | struct ethtool_rxnfc *cmd) | |
4711 | { | |
4712 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4713 | struct hclge_dev *hdev = vport->back; | |
4714 | ||
4715 | if (!hnae3_dev_fd_supported(hdev)) | |
4716 | return -EOPNOTSUPP; | |
4717 | ||
4718 | cmd->rule_cnt = hdev->hclge_fd_rule_num; | |
4719 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
4720 | ||
4721 | return 0; | |
4722 | } | |
4723 | ||
4724 | static int hclge_get_fd_rule_info(struct hnae3_handle *handle, | |
4725 | struct ethtool_rxnfc *cmd) | |
4726 | { | |
4727 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4728 | struct hclge_fd_rule *rule = NULL; | |
4729 | struct hclge_dev *hdev = vport->back; | |
4730 | struct ethtool_rx_flow_spec *fs; | |
4731 | struct hlist_node *node2; | |
4732 | ||
4733 | if (!hnae3_dev_fd_supported(hdev)) | |
4734 | return -EOPNOTSUPP; | |
4735 | ||
4736 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; | |
4737 | ||
4738 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { | |
4739 | if (rule->location >= fs->location) | |
4740 | break; | |
4741 | } | |
4742 | ||
4743 | if (!rule || fs->location != rule->location) | |
4744 | return -ENOENT; | |
4745 | ||
4746 | fs->flow_type = rule->flow_type; | |
4747 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | |
4748 | case SCTP_V4_FLOW: | |
4749 | case TCP_V4_FLOW: | |
4750 | case UDP_V4_FLOW: | |
4751 | fs->h_u.tcp_ip4_spec.ip4src = | |
4752 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4753 | fs->m_u.tcp_ip4_spec.ip4src = | |
4754 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4755 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4756 | ||
4757 | fs->h_u.tcp_ip4_spec.ip4dst = | |
4758 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4759 | fs->m_u.tcp_ip4_spec.ip4dst = | |
4760 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4761 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4762 | ||
4763 | fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4764 | fs->m_u.tcp_ip4_spec.psrc = | |
4765 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4766 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4767 | ||
4768 | fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4769 | fs->m_u.tcp_ip4_spec.pdst = | |
4770 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4771 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4772 | ||
4773 | fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; | |
4774 | fs->m_u.tcp_ip4_spec.tos = | |
4775 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4776 | 0 : rule->tuples_mask.ip_tos; | |
4777 | ||
4778 | break; | |
4779 | case IP_USER_FLOW: | |
4780 | fs->h_u.usr_ip4_spec.ip4src = | |
4781 | cpu_to_be32(rule->tuples.src_ip[3]); | |
4782 | fs->m_u.tcp_ip4_spec.ip4src = | |
4783 | rule->unused_tuple & BIT(INNER_SRC_IP) ? | |
4784 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); | |
4785 | ||
4786 | fs->h_u.usr_ip4_spec.ip4dst = | |
4787 | cpu_to_be32(rule->tuples.dst_ip[3]); | |
4788 | fs->m_u.usr_ip4_spec.ip4dst = | |
4789 | rule->unused_tuple & BIT(INNER_DST_IP) ? | |
4790 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); | |
4791 | ||
4792 | fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; | |
4793 | fs->m_u.usr_ip4_spec.tos = | |
4794 | rule->unused_tuple & BIT(INNER_IP_TOS) ? | |
4795 | 0 : rule->tuples_mask.ip_tos; | |
4796 | ||
4797 | fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; | |
4798 | fs->m_u.usr_ip4_spec.proto = | |
4799 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4800 | 0 : rule->tuples_mask.ip_proto; | |
4801 | ||
4802 | fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; | |
4803 | ||
4804 | break; | |
4805 | case SCTP_V6_FLOW: | |
4806 | case TCP_V6_FLOW: | |
4807 | case UDP_V6_FLOW: | |
4808 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, | |
4809 | rule->tuples.src_ip, 4); | |
4810 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4811 | memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4812 | else | |
4813 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, | |
4814 | rule->tuples_mask.src_ip, 4); | |
4815 | ||
4816 | cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, | |
4817 | rule->tuples.dst_ip, 4); | |
4818 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4819 | memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4820 | else | |
4821 | cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, | |
4822 | rule->tuples_mask.dst_ip, 4); | |
4823 | ||
4824 | fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); | |
4825 | fs->m_u.tcp_ip6_spec.psrc = | |
4826 | rule->unused_tuple & BIT(INNER_SRC_PORT) ? | |
4827 | 0 : cpu_to_be16(rule->tuples_mask.src_port); | |
4828 | ||
4829 | fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); | |
4830 | fs->m_u.tcp_ip6_spec.pdst = | |
4831 | rule->unused_tuple & BIT(INNER_DST_PORT) ? | |
4832 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); | |
4833 | ||
4834 | break; | |
4835 | case IPV6_USER_FLOW: | |
4836 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, | |
4837 | rule->tuples.src_ip, 4); | |
4838 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) | |
4839 | memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); | |
4840 | else | |
4841 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, | |
4842 | rule->tuples_mask.src_ip, 4); | |
4843 | ||
4844 | cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, | |
4845 | rule->tuples.dst_ip, 4); | |
4846 | if (rule->unused_tuple & BIT(INNER_DST_IP)) | |
4847 | memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); | |
4848 | else | |
4849 | cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, | |
4850 | rule->tuples_mask.dst_ip, 4); | |
4851 | ||
4852 | fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; | |
4853 | fs->m_u.usr_ip6_spec.l4_proto = | |
4854 | rule->unused_tuple & BIT(INNER_IP_PROTO) ? | |
4855 | 0 : rule->tuples_mask.ip_proto; | |
4856 | ||
4857 | break; | |
4858 | case ETHER_FLOW: | |
4859 | ether_addr_copy(fs->h_u.ether_spec.h_source, | |
4860 | rule->tuples.src_mac); | |
4861 | if (rule->unused_tuple & BIT(INNER_SRC_MAC)) | |
4862 | eth_zero_addr(fs->m_u.ether_spec.h_source); | |
4863 | else | |
4864 | ether_addr_copy(fs->m_u.ether_spec.h_source, | |
4865 | rule->tuples_mask.src_mac); | |
4866 | ||
4867 | ether_addr_copy(fs->h_u.ether_spec.h_dest, | |
4868 | rule->tuples.dst_mac); | |
4869 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4870 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4871 | else | |
4872 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4873 | rule->tuples_mask.dst_mac); | |
4874 | ||
4875 | fs->h_u.ether_spec.h_proto = | |
4876 | cpu_to_be16(rule->tuples.ether_proto); | |
4877 | fs->m_u.ether_spec.h_proto = | |
4878 | rule->unused_tuple & BIT(INNER_ETH_TYPE) ? | |
4879 | 0 : cpu_to_be16(rule->tuples_mask.ether_proto); | |
4880 | ||
4881 | break; | |
4882 | default: | |
4883 | return -EOPNOTSUPP; | |
4884 | } | |
4885 | ||
4886 | if (fs->flow_type & FLOW_EXT) { | |
4887 | fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); | |
4888 | fs->m_ext.vlan_tci = | |
4889 | rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? | |
4890 | cpu_to_be16(VLAN_VID_MASK) : | |
4891 | cpu_to_be16(rule->tuples_mask.vlan_tag1); | |
4892 | } | |
4893 | ||
4894 | if (fs->flow_type & FLOW_MAC_EXT) { | |
4895 | ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); | |
4896 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) | |
4897 | eth_zero_addr(fs->m_u.ether_spec.h_dest); | |
4898 | else | |
4899 | ether_addr_copy(fs->m_u.ether_spec.h_dest, | |
4900 | rule->tuples_mask.dst_mac); | |
4901 | } | |
4902 | ||
4903 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { | |
4904 | fs->ring_cookie = RX_CLS_FLOW_DISC; | |
4905 | } else { | |
4906 | u64 vf_id; | |
4907 | ||
4908 | fs->ring_cookie = rule->queue_id; | |
4909 | vf_id = rule->vf_id; | |
4910 | vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; | |
4911 | fs->ring_cookie |= vf_id; | |
4912 | } | |
4913 | ||
4914 | return 0; | |
4915 | } | |
4916 | ||
4917 | static int hclge_get_all_rules(struct hnae3_handle *handle, | |
4918 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | |
4919 | { | |
4920 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4921 | struct hclge_dev *hdev = vport->back; | |
4922 | struct hclge_fd_rule *rule; | |
4923 | struct hlist_node *node2; | |
4924 | int cnt = 0; | |
4925 | ||
4926 | if (!hnae3_dev_fd_supported(hdev)) | |
4927 | return -EOPNOTSUPP; | |
4928 | ||
4929 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; | |
4930 | ||
4931 | hlist_for_each_entry_safe(rule, node2, | |
4932 | &hdev->fd_rule_list, rule_node) { | |
4933 | if (cnt == cmd->rule_cnt) | |
4934 | return -EMSGSIZE; | |
4935 | ||
4936 | rule_locs[cnt] = rule->location; | |
4937 | cnt++; | |
4938 | } | |
4939 | ||
4940 | cmd->rule_cnt = cnt; | |
4941 | ||
4942 | return 0; | |
4943 | } | |
4944 | ||
225c02eb HT |
4945 | static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) |
4946 | { | |
4947 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4948 | struct hclge_dev *hdev = vport->back; | |
4949 | ||
4950 | return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || | |
4951 | hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); | |
4952 | } | |
4953 | ||
4954 | static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) | |
4955 | { | |
4956 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4957 | struct hclge_dev *hdev = vport->back; | |
4958 | ||
4959 | return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
4960 | } | |
4961 | ||
4962 | static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) | |
4963 | { | |
4964 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4965 | struct hclge_dev *hdev = vport->back; | |
4966 | ||
4967 | return hdev->reset_count; | |
4968 | } | |
4969 | ||
d1f04a80 JS |
4970 | static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) |
4971 | { | |
4972 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4973 | struct hclge_dev *hdev = vport->back; | |
4974 | ||
4975 | hdev->fd_cfg.fd_en = enable; | |
4976 | if (!enable) | |
4977 | hclge_del_all_fd_entries(handle, false); | |
4978 | else | |
4979 | hclge_restore_fd_entries(handle); | |
4980 | } | |
4981 | ||
46a3df9f S |
4982 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) |
4983 | { | |
4984 | struct hclge_desc desc; | |
d44f9b63 YL |
4985 | struct hclge_config_mac_mode_cmd *req = |
4986 | (struct hclge_config_mac_mode_cmd *)desc.data; | |
a90bb9a5 | 4987 | u32 loop_en = 0; |
46a3df9f S |
4988 | int ret; |
4989 | ||
4990 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); | |
ccc23ef3 PL |
4991 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); |
4992 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); | |
4993 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); | |
4994 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); | |
4995 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); | |
4996 | hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); | |
4997 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); | |
4998 | hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); | |
4999 | hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); | |
5000 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); | |
5001 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); | |
5002 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); | |
5003 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); | |
5004 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); | |
a90bb9a5 | 5005 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); |
46a3df9f S |
5006 | |
5007 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5008 | if (ret) | |
5009 | dev_err(&hdev->pdev->dev, | |
5010 | "mac enable fail, ret =%d.\n", ret); | |
5011 | } | |
5012 | ||
67b8c316 | 5013 | static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) |
c39c4d98 | 5014 | { |
c39c4d98 | 5015 | struct hclge_config_mac_mode_cmd *req; |
c39c4d98 YL |
5016 | struct hclge_desc desc; |
5017 | u32 loop_en; | |
5018 | int ret; | |
5019 | ||
e67d9ce9 YL |
5020 | req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; |
5021 | /* 1 Read out the MAC mode config at first */ | |
5022 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); | |
5023 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5024 | if (ret) { | |
5025 | dev_err(&hdev->pdev->dev, | |
5026 | "mac loopback get fail, ret =%d.\n", ret); | |
5027 | return ret; | |
5028 | } | |
c39c4d98 | 5029 | |
e67d9ce9 YL |
5030 | /* 2 Then setup the loopback flag */ |
5031 | loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); | |
ccc23ef3 | 5032 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); |
3ebc5e0b YL |
5033 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); |
5034 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); | |
e67d9ce9 YL |
5035 | |
5036 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); | |
c39c4d98 | 5037 | |
e67d9ce9 YL |
5038 | /* 3 Config mac work mode with loopback flag |
5039 | * and its original configure parameters | |
5040 | */ | |
5041 | hclge_cmd_reuse_desc(&desc, false); | |
5042 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5043 | if (ret) | |
5044 | dev_err(&hdev->pdev->dev, | |
5045 | "mac loopback set fail, ret =%d.\n", ret); | |
5046 | return ret; | |
5047 | } | |
c39c4d98 | 5048 | |
86957272 FL |
5049 | static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, |
5050 | enum hnae3_loop loop_mode) | |
e006bb00 PL |
5051 | { |
5052 | #define HCLGE_SERDES_RETRY_MS 10 | |
5053 | #define HCLGE_SERDES_RETRY_NUM 100 | |
5054 | struct hclge_serdes_lb_cmd *req; | |
5055 | struct hclge_desc desc; | |
5056 | int ret, i = 0; | |
86957272 | 5057 | u8 loop_mode_b; |
e006bb00 | 5058 | |
855f03fb | 5059 | req = (struct hclge_serdes_lb_cmd *)desc.data; |
e006bb00 PL |
5060 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); |
5061 | ||
86957272 FL |
5062 | switch (loop_mode) { |
5063 | case HNAE3_LOOP_SERIAL_SERDES: | |
5064 | loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; | |
5065 | break; | |
5066 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5067 | loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; | |
5068 | break; | |
5069 | default: | |
5070 | dev_err(&hdev->pdev->dev, | |
5071 | "unsupported serdes loopback mode %d\n", loop_mode); | |
5072 | return -ENOTSUPP; | |
5073 | } | |
5074 | ||
e006bb00 | 5075 | if (en) { |
86957272 FL |
5076 | req->enable = loop_mode_b; |
5077 | req->mask = loop_mode_b; | |
e006bb00 | 5078 | } else { |
86957272 | 5079 | req->mask = loop_mode_b; |
e006bb00 PL |
5080 | } |
5081 | ||
5082 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5083 | if (ret) { | |
5084 | dev_err(&hdev->pdev->dev, | |
5085 | "serdes loopback set fail, ret = %d\n", ret); | |
5086 | return ret; | |
5087 | } | |
5088 | ||
5089 | do { | |
5090 | msleep(HCLGE_SERDES_RETRY_MS); | |
5091 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, | |
5092 | true); | |
5093 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5094 | if (ret) { | |
5095 | dev_err(&hdev->pdev->dev, | |
5096 | "serdes loopback get, ret = %d\n", ret); | |
5097 | return ret; | |
5098 | } | |
5099 | } while (++i < HCLGE_SERDES_RETRY_NUM && | |
5100 | !(req->result & HCLGE_CMD_SERDES_DONE_B)); | |
5101 | ||
5102 | if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { | |
5103 | dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); | |
5104 | return -EBUSY; | |
5105 | } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { | |
5106 | dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); | |
5107 | return -EIO; | |
5108 | } | |
5109 | ||
3ebc5e0b | 5110 | hclge_cfg_mac_mode(hdev, en); |
e006bb00 PL |
5111 | return 0; |
5112 | } | |
5113 | ||
3ebc5e0b YL |
5114 | static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, |
5115 | int stream_id, bool enable) | |
5116 | { | |
5117 | struct hclge_desc desc; | |
5118 | struct hclge_cfg_com_tqp_queue_cmd *req = | |
5119 | (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; | |
5120 | int ret; | |
5121 | ||
5122 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); | |
5123 | req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); | |
5124 | req->stream_id = cpu_to_le16(stream_id); | |
5125 | req->enable |= enable << HCLGE_TQP_ENABLE_B; | |
5126 | ||
5127 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5128 | if (ret) | |
5129 | dev_err(&hdev->pdev->dev, | |
5130 | "Tqp enable fail, status =%d.\n", ret); | |
5131 | return ret; | |
5132 | } | |
5133 | ||
e67d9ce9 YL |
5134 | static int hclge_set_loopback(struct hnae3_handle *handle, |
5135 | enum hnae3_loop loop_mode, bool en) | |
5136 | { | |
5137 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5138 | struct hclge_dev *hdev = vport->back; | |
3ebc5e0b | 5139 | int i, ret; |
e67d9ce9 YL |
5140 | |
5141 | switch (loop_mode) { | |
67b8c316 FL |
5142 | case HNAE3_LOOP_APP: |
5143 | ret = hclge_set_app_loopback(hdev, en); | |
c39c4d98 | 5144 | break; |
86957272 FL |
5145 | case HNAE3_LOOP_SERIAL_SERDES: |
5146 | case HNAE3_LOOP_PARALLEL_SERDES: | |
5147 | ret = hclge_set_serdes_loopback(hdev, en, loop_mode); | |
e006bb00 | 5148 | break; |
c39c4d98 YL |
5149 | default: |
5150 | ret = -ENOTSUPP; | |
5151 | dev_err(&hdev->pdev->dev, | |
5152 | "loop_mode %d is not supported\n", loop_mode); | |
5153 | break; | |
5154 | } | |
5155 | ||
3ebc5e0b YL |
5156 | for (i = 0; i < vport->alloc_tqps; i++) { |
5157 | ret = hclge_tqp_enable(hdev, i, 0, en); | |
5158 | if (ret) | |
5159 | return ret; | |
5160 | } | |
46a3df9f | 5161 | |
3ebc5e0b | 5162 | return 0; |
46a3df9f S |
5163 | } |
5164 | ||
5165 | static void hclge_reset_tqp_stats(struct hnae3_handle *handle) | |
5166 | { | |
5167 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5168 | struct hnae3_queue *queue; | |
5169 | struct hclge_tqp *tqp; | |
5170 | int i; | |
5171 | ||
5172 | for (i = 0; i < vport->alloc_tqps; i++) { | |
5173 | queue = handle->kinfo.tqp[i]; | |
5174 | tqp = container_of(queue, struct hclge_tqp, q); | |
5175 | memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); | |
5176 | } | |
5177 | } | |
5178 | ||
5179 | static int hclge_ae_start(struct hnae3_handle *handle) | |
5180 | { | |
5181 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5182 | struct hclge_dev *hdev = vport->back; | |
46a3df9f | 5183 | |
46a3df9f S |
5184 | /* mac enable */ |
5185 | hclge_cfg_mac_mode(hdev, true); | |
5186 | clear_bit(HCLGE_STATE_DOWN, &hdev->state); | |
d039ef68 | 5187 | mod_timer(&hdev->service_timer, jiffies + HZ); |
3ae84019 | 5188 | hdev->hw.mac.link = 0; |
46a3df9f | 5189 | |
f9637cc2 PL |
5190 | /* reset tqp stats */ |
5191 | hclge_reset_tqp_stats(handle); | |
5192 | ||
dda6b7d5 | 5193 | hclge_mac_start_phy(hdev); |
46a3df9f | 5194 | |
46a3df9f S |
5195 | return 0; |
5196 | } | |
5197 | ||
5198 | static void hclge_ae_stop(struct hnae3_handle *handle) | |
5199 | { | |
5200 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5201 | struct hclge_dev *hdev = vport->back; | |
46a3df9f | 5202 | |
4ee3e5a8 FL |
5203 | set_bit(HCLGE_STATE_DOWN, &hdev->state); |
5204 | ||
f9637cc2 PL |
5205 | del_timer_sync(&hdev->service_timer); |
5206 | cancel_work_sync(&hdev->service_task); | |
42b11ab7 | 5207 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); |
f9637cc2 | 5208 | |
48ac80db HT |
5209 | /* If it is not PF reset, the firmware will disable the MAC, |
5210 | * so it only need to stop phy here. | |
5211 | */ | |
5212 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && | |
5213 | hdev->reset_type != HNAE3_FUNC_RESET) { | |
4486f5c9 | 5214 | hclge_mac_stop_phy(hdev); |
f9637cc2 | 5215 | return; |
4486f5c9 | 5216 | } |
f9637cc2 | 5217 | |
46a3df9f S |
5218 | /* Mac disable */ |
5219 | hclge_cfg_mac_mode(hdev, false); | |
5220 | ||
5221 | hclge_mac_stop_phy(hdev); | |
5222 | ||
5223 | /* reset tqp stats */ | |
5224 | hclge_reset_tqp_stats(handle); | |
b91fb71c FL |
5225 | del_timer_sync(&hdev->service_timer); |
5226 | cancel_work_sync(&hdev->service_task); | |
5227 | hclge_update_link_status(hdev); | |
46a3df9f S |
5228 | } |
5229 | ||
337460de YL |
5230 | int hclge_vport_start(struct hclge_vport *vport) |
5231 | { | |
5232 | set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5233 | vport->last_active_jiffies = jiffies; | |
5234 | return 0; | |
5235 | } | |
5236 | ||
5237 | void hclge_vport_stop(struct hclge_vport *vport) | |
5238 | { | |
5239 | clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); | |
5240 | } | |
5241 | ||
5242 | static int hclge_client_start(struct hnae3_handle *handle) | |
5243 | { | |
5244 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5245 | ||
5246 | return hclge_vport_start(vport); | |
5247 | } | |
5248 | ||
5249 | static void hclge_client_stop(struct hnae3_handle *handle) | |
5250 | { | |
5251 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5252 | ||
5253 | hclge_vport_stop(vport); | |
5254 | } | |
5255 | ||
46a3df9f S |
5256 | static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, |
5257 | u16 cmdq_resp, u8 resp_code, | |
5258 | enum hclge_mac_vlan_tbl_opcode op) | |
5259 | { | |
5260 | struct hclge_dev *hdev = vport->back; | |
5261 | int return_status = -EIO; | |
5262 | ||
5263 | if (cmdq_resp) { | |
5264 | dev_err(&hdev->pdev->dev, | |
5265 | "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", | |
5266 | cmdq_resp); | |
5267 | return -EIO; | |
5268 | } | |
5269 | ||
5270 | if (op == HCLGE_MAC_VLAN_ADD) { | |
5271 | if ((!resp_code) || (resp_code == 1)) { | |
5272 | return_status = 0; | |
5273 | } else if (resp_code == 2) { | |
2f894c5b | 5274 | return_status = -ENOSPC; |
46a3df9f S |
5275 | dev_err(&hdev->pdev->dev, |
5276 | "add mac addr failed for uc_overflow.\n"); | |
5277 | } else if (resp_code == 3) { | |
2f894c5b | 5278 | return_status = -ENOSPC; |
46a3df9f S |
5279 | dev_err(&hdev->pdev->dev, |
5280 | "add mac addr failed for mc_overflow.\n"); | |
5281 | } else { | |
5282 | dev_err(&hdev->pdev->dev, | |
5283 | "add mac addr failed for undefined, code=%d.\n", | |
5284 | resp_code); | |
5285 | } | |
5286 | } else if (op == HCLGE_MAC_VLAN_REMOVE) { | |
5287 | if (!resp_code) { | |
5288 | return_status = 0; | |
5289 | } else if (resp_code == 1) { | |
2f894c5b | 5290 | return_status = -ENOENT; |
46a3df9f S |
5291 | dev_dbg(&hdev->pdev->dev, |
5292 | "remove mac addr failed for miss.\n"); | |
5293 | } else { | |
5294 | dev_err(&hdev->pdev->dev, | |
5295 | "remove mac addr failed for undefined, code=%d.\n", | |
5296 | resp_code); | |
5297 | } | |
5298 | } else if (op == HCLGE_MAC_VLAN_LKUP) { | |
5299 | if (!resp_code) { | |
5300 | return_status = 0; | |
5301 | } else if (resp_code == 1) { | |
2f894c5b | 5302 | return_status = -ENOENT; |
46a3df9f S |
5303 | dev_dbg(&hdev->pdev->dev, |
5304 | "lookup mac addr failed for miss.\n"); | |
5305 | } else { | |
5306 | dev_err(&hdev->pdev->dev, | |
5307 | "lookup mac addr failed for undefined, code=%d.\n", | |
5308 | resp_code); | |
5309 | } | |
5310 | } else { | |
2f894c5b | 5311 | return_status = -EINVAL; |
46a3df9f S |
5312 | dev_err(&hdev->pdev->dev, |
5313 | "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", | |
5314 | op); | |
5315 | } | |
5316 | ||
5317 | return return_status; | |
5318 | } | |
5319 | ||
5320 | static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) | |
5321 | { | |
5322 | int word_num; | |
5323 | int bit_num; | |
5324 | ||
5325 | if (vfid > 255 || vfid < 0) | |
5326 | return -EIO; | |
5327 | ||
5328 | if (vfid >= 0 && vfid <= 191) { | |
5329 | word_num = vfid / 32; | |
5330 | bit_num = vfid % 32; | |
5331 | if (clr) | |
a90bb9a5 | 5332 | desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5333 | else |
a90bb9a5 | 5334 | desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5335 | } else { |
5336 | word_num = (vfid - 192) / 32; | |
5337 | bit_num = vfid % 32; | |
5338 | if (clr) | |
a90bb9a5 | 5339 | desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 5340 | else |
a90bb9a5 | 5341 | desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
5342 | } |
5343 | ||
5344 | return 0; | |
5345 | } | |
5346 | ||
5347 | static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) | |
5348 | { | |
5349 | #define HCLGE_DESC_NUMBER 3 | |
5350 | #define HCLGE_FUNC_NUMBER_PER_DESC 6 | |
5351 | int i, j; | |
5352 | ||
5353 | for (i = 0; i < HCLGE_DESC_NUMBER; i++) | |
5354 | for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) | |
5355 | if (desc[i].data[j]) | |
5356 | return false; | |
5357 | ||
5358 | return true; | |
5359 | } | |
5360 | ||
d44f9b63 | 5361 | static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, |
46a3df9f S |
5362 | const u8 *addr) |
5363 | { | |
5364 | const unsigned char *mac_addr = addr; | |
5365 | u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | | |
5366 | (mac_addr[0]) | (mac_addr[1] << 8); | |
5367 | u32 low_val = mac_addr[4] | (mac_addr[5] << 8); | |
5368 | ||
5369 | new_req->mac_addr_hi32 = cpu_to_le32(high_val); | |
5370 | new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); | |
5371 | } | |
5372 | ||
46a3df9f | 5373 | static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, |
d44f9b63 | 5374 | struct hclge_mac_vlan_tbl_entry_cmd *req) |
46a3df9f S |
5375 | { |
5376 | struct hclge_dev *hdev = vport->back; | |
5377 | struct hclge_desc desc; | |
5378 | u8 resp_code; | |
a90bb9a5 | 5379 | u16 retval; |
46a3df9f S |
5380 | int ret; |
5381 | ||
5382 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); | |
5383 | ||
d44f9b63 | 5384 | memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5385 | |
5386 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5387 | if (ret) { | |
5388 | dev_err(&hdev->pdev->dev, | |
5389 | "del mac addr failed for cmd_send, ret =%d.\n", | |
5390 | ret); | |
5391 | return ret; | |
5392 | } | |
a90bb9a5 YL |
5393 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5394 | retval = le16_to_cpu(desc.retval); | |
46a3df9f | 5395 | |
a90bb9a5 | 5396 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5397 | HCLGE_MAC_VLAN_REMOVE); |
5398 | } | |
5399 | ||
5400 | static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5401 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5402 | struct hclge_desc *desc, |
5403 | bool is_mc) | |
5404 | { | |
5405 | struct hclge_dev *hdev = vport->back; | |
5406 | u8 resp_code; | |
a90bb9a5 | 5407 | u16 retval; |
46a3df9f S |
5408 | int ret; |
5409 | ||
5410 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); | |
5411 | if (is_mc) { | |
5412 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5413 | memcpy(desc[0].data, | |
5414 | req, | |
d44f9b63 | 5415 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5416 | hclge_cmd_setup_basic_desc(&desc[1], |
5417 | HCLGE_OPC_MAC_VLAN_ADD, | |
5418 | true); | |
5419 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
5420 | hclge_cmd_setup_basic_desc(&desc[2], | |
5421 | HCLGE_OPC_MAC_VLAN_ADD, | |
5422 | true); | |
5423 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
5424 | } else { | |
5425 | memcpy(desc[0].data, | |
5426 | req, | |
d44f9b63 | 5427 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
5428 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
5429 | } | |
5430 | if (ret) { | |
5431 | dev_err(&hdev->pdev->dev, | |
5432 | "lookup mac addr failed for cmd_send, ret =%d.\n", | |
5433 | ret); | |
5434 | return ret; | |
5435 | } | |
a90bb9a5 YL |
5436 | resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; |
5437 | retval = le16_to_cpu(desc[0].retval); | |
46a3df9f | 5438 | |
a90bb9a5 | 5439 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
5440 | HCLGE_MAC_VLAN_LKUP); |
5441 | } | |
5442 | ||
5443 | static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 5444 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
5445 | struct hclge_desc *mc_desc) |
5446 | { | |
5447 | struct hclge_dev *hdev = vport->back; | |
5448 | int cfg_status; | |
5449 | u8 resp_code; | |
a90bb9a5 | 5450 | u16 retval; |
46a3df9f S |
5451 | int ret; |
5452 | ||
5453 | if (!mc_desc) { | |
5454 | struct hclge_desc desc; | |
5455 | ||
5456 | hclge_cmd_setup_basic_desc(&desc, | |
5457 | HCLGE_OPC_MAC_VLAN_ADD, | |
5458 | false); | |
d44f9b63 YL |
5459 | memcpy(desc.data, req, |
5460 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); | |
46a3df9f | 5461 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
a90bb9a5 YL |
5462 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
5463 | retval = le16_to_cpu(desc.retval); | |
5464 | ||
5465 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5466 | resp_code, |
5467 | HCLGE_MAC_VLAN_ADD); | |
5468 | } else { | |
c3b6f755 | 5469 | hclge_cmd_reuse_desc(&mc_desc[0], false); |
46a3df9f | 5470 | mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5471 | hclge_cmd_reuse_desc(&mc_desc[1], false); |
46a3df9f | 5472 | mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 5473 | hclge_cmd_reuse_desc(&mc_desc[2], false); |
46a3df9f S |
5474 | mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); |
5475 | memcpy(mc_desc[0].data, req, | |
d44f9b63 | 5476 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f | 5477 | ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); |
a90bb9a5 YL |
5478 | resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; |
5479 | retval = le16_to_cpu(mc_desc[0].retval); | |
5480 | ||
5481 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
5482 | resp_code, |
5483 | HCLGE_MAC_VLAN_ADD); | |
5484 | } | |
5485 | ||
5486 | if (ret) { | |
5487 | dev_err(&hdev->pdev->dev, | |
5488 | "add mac addr failed for cmd_send, ret =%d.\n", | |
5489 | ret); | |
5490 | return ret; | |
5491 | } | |
5492 | ||
5493 | return cfg_status; | |
5494 | } | |
5495 | ||
2da5ec58 JS |
5496 | static int hclge_init_umv_space(struct hclge_dev *hdev) |
5497 | { | |
5498 | u16 allocated_size = 0; | |
5499 | int ret; | |
5500 | ||
5501 | ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, | |
5502 | true); | |
5503 | if (ret) | |
5504 | return ret; | |
5505 | ||
5506 | if (allocated_size < hdev->wanted_umv_size) | |
5507 | dev_warn(&hdev->pdev->dev, | |
5508 | "Alloc umv space failed, want %d, get %d\n", | |
5509 | hdev->wanted_umv_size, allocated_size); | |
5510 | ||
5511 | mutex_init(&hdev->umv_mutex); | |
5512 | hdev->max_umv_size = allocated_size; | |
5513 | hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); | |
5514 | hdev->share_umv_size = hdev->priv_umv_size + | |
5515 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5516 | ||
5517 | return 0; | |
5518 | } | |
5519 | ||
5520 | static int hclge_uninit_umv_space(struct hclge_dev *hdev) | |
5521 | { | |
5522 | int ret; | |
5523 | ||
5524 | if (hdev->max_umv_size > 0) { | |
5525 | ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, | |
5526 | false); | |
5527 | if (ret) | |
5528 | return ret; | |
5529 | hdev->max_umv_size = 0; | |
5530 | } | |
5531 | mutex_destroy(&hdev->umv_mutex); | |
5532 | ||
5533 | return 0; | |
5534 | } | |
5535 | ||
5536 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, | |
5537 | u16 *allocated_size, bool is_alloc) | |
5538 | { | |
5539 | struct hclge_umv_spc_alc_cmd *req; | |
5540 | struct hclge_desc desc; | |
5541 | int ret; | |
5542 | ||
5543 | req = (struct hclge_umv_spc_alc_cmd *)desc.data; | |
5544 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); | |
5545 | hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); | |
5546 | req->space_size = cpu_to_le32(space_size); | |
5547 | ||
5548 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5549 | if (ret) { | |
5550 | dev_err(&hdev->pdev->dev, | |
5551 | "%s umv space failed for cmd_send, ret =%d\n", | |
5552 | is_alloc ? "allocate" : "free", ret); | |
5553 | return ret; | |
5554 | } | |
5555 | ||
5556 | if (is_alloc && allocated_size) | |
5557 | *allocated_size = le32_to_cpu(desc.data[1]); | |
5558 | ||
5559 | return 0; | |
5560 | } | |
5561 | ||
5562 | static void hclge_reset_umv_space(struct hclge_dev *hdev) | |
5563 | { | |
5564 | struct hclge_vport *vport; | |
5565 | int i; | |
5566 | ||
5567 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
5568 | vport = &hdev->vport[i]; | |
5569 | vport->used_umv_num = 0; | |
5570 | } | |
5571 | ||
5572 | mutex_lock(&hdev->umv_mutex); | |
5573 | hdev->share_umv_size = hdev->priv_umv_size + | |
5574 | hdev->max_umv_size % (hdev->num_req_vfs + 2); | |
5575 | mutex_unlock(&hdev->umv_mutex); | |
5576 | } | |
5577 | ||
5578 | static bool hclge_is_umv_space_full(struct hclge_vport *vport) | |
5579 | { | |
5580 | struct hclge_dev *hdev = vport->back; | |
5581 | bool is_full; | |
5582 | ||
5583 | mutex_lock(&hdev->umv_mutex); | |
5584 | is_full = (vport->used_umv_num >= hdev->priv_umv_size && | |
5585 | hdev->share_umv_size == 0); | |
5586 | mutex_unlock(&hdev->umv_mutex); | |
5587 | ||
5588 | return is_full; | |
5589 | } | |
5590 | ||
5591 | static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) | |
5592 | { | |
5593 | struct hclge_dev *hdev = vport->back; | |
5594 | ||
5595 | mutex_lock(&hdev->umv_mutex); | |
5596 | if (is_free) { | |
5597 | if (vport->used_umv_num > hdev->priv_umv_size) | |
5598 | hdev->share_umv_size++; | |
5599 | vport->used_umv_num--; | |
5600 | } else { | |
5601 | if (vport->used_umv_num >= hdev->priv_umv_size) | |
5602 | hdev->share_umv_size--; | |
5603 | vport->used_umv_num++; | |
5604 | } | |
5605 | mutex_unlock(&hdev->umv_mutex); | |
5606 | } | |
5607 | ||
46a3df9f S |
5608 | static int hclge_add_uc_addr(struct hnae3_handle *handle, |
5609 | const unsigned char *addr) | |
5610 | { | |
5611 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5612 | ||
5613 | return hclge_add_uc_addr_common(vport, addr); | |
5614 | } | |
5615 | ||
5616 | int hclge_add_uc_addr_common(struct hclge_vport *vport, | |
5617 | const unsigned char *addr) | |
5618 | { | |
5619 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5620 | struct hclge_mac_vlan_tbl_entry_cmd req; |
bf88f41f | 5621 | struct hclge_desc desc; |
a90bb9a5 | 5622 | u16 egress_port = 0; |
04f0c72a | 5623 | int ret; |
46a3df9f S |
5624 | |
5625 | /* mac addr check */ | |
5626 | if (is_zero_ether_addr(addr) || | |
5627 | is_broadcast_ether_addr(addr) || | |
5628 | is_multicast_ether_addr(addr)) { | |
5629 | dev_err(&hdev->pdev->dev, | |
5630 | "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", | |
5631 | addr, | |
5632 | is_zero_ether_addr(addr), | |
5633 | is_broadcast_ether_addr(addr), | |
5634 | is_multicast_ether_addr(addr)); | |
5635 | return -EINVAL; | |
5636 | } | |
5637 | ||
5638 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 | 5639 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
a90bb9a5 | 5640 | |
ccc23ef3 PL |
5641 | hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, |
5642 | HCLGE_MAC_EPORT_VFID_S, vport->vport_id); | |
a90bb9a5 YL |
5643 | |
5644 | req.egress_port = cpu_to_le16(egress_port); | |
46a3df9f S |
5645 | |
5646 | hclge_prepare_mac_addr(&req, addr); | |
5647 | ||
bf88f41f JS |
5648 | /* Lookup the mac address in the mac_vlan table, and add |
5649 | * it if the entry is inexistent. Repeated unicast entry | |
5650 | * is not allowed in the mac vlan table. | |
5651 | */ | |
5652 | ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); | |
2da5ec58 JS |
5653 | if (ret == -ENOENT) { |
5654 | if (!hclge_is_umv_space_full(vport)) { | |
5655 | ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); | |
5656 | if (!ret) | |
5657 | hclge_update_umv_space(vport, false); | |
5658 | return ret; | |
5659 | } | |
5660 | ||
5661 | dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", | |
5662 | hdev->priv_umv_size); | |
5663 | ||
5664 | return -ENOSPC; | |
5665 | } | |
bf88f41f JS |
5666 | |
5667 | /* check if we just hit the duplicate */ | |
5668 | if (!ret) | |
5669 | ret = -EINVAL; | |
5670 | ||
5671 | dev_err(&hdev->pdev->dev, | |
5672 | "PF failed to add unicast entry(%pM) in the MAC table\n", | |
5673 | addr); | |
46a3df9f | 5674 | |
04f0c72a | 5675 | return ret; |
46a3df9f S |
5676 | } |
5677 | ||
5678 | static int hclge_rm_uc_addr(struct hnae3_handle *handle, | |
5679 | const unsigned char *addr) | |
5680 | { | |
5681 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5682 | ||
5683 | return hclge_rm_uc_addr_common(vport, addr); | |
5684 | } | |
5685 | ||
5686 | int hclge_rm_uc_addr_common(struct hclge_vport *vport, | |
5687 | const unsigned char *addr) | |
5688 | { | |
5689 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5690 | struct hclge_mac_vlan_tbl_entry_cmd req; |
04f0c72a | 5691 | int ret; |
46a3df9f S |
5692 | |
5693 | /* mac addr check */ | |
5694 | if (is_zero_ether_addr(addr) || | |
5695 | is_broadcast_ether_addr(addr) || | |
5696 | is_multicast_ether_addr(addr)) { | |
5697 | dev_dbg(&hdev->pdev->dev, | |
5698 | "Remove mac err! invalid mac:%pM.\n", | |
5699 | addr); | |
5700 | return -EINVAL; | |
5701 | } | |
5702 | ||
5703 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5704 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5705 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
46a3df9f | 5706 | hclge_prepare_mac_addr(&req, addr); |
04f0c72a | 5707 | ret = hclge_remove_mac_vlan_tbl(vport, &req); |
2da5ec58 JS |
5708 | if (!ret) |
5709 | hclge_update_umv_space(vport, true); | |
46a3df9f | 5710 | |
04f0c72a | 5711 | return ret; |
46a3df9f S |
5712 | } |
5713 | ||
5714 | static int hclge_add_mc_addr(struct hnae3_handle *handle, | |
5715 | const unsigned char *addr) | |
5716 | { | |
5717 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5718 | ||
2bf8098b | 5719 | return hclge_add_mc_addr_common(vport, addr); |
46a3df9f S |
5720 | } |
5721 | ||
5722 | int hclge_add_mc_addr_common(struct hclge_vport *vport, | |
5723 | const unsigned char *addr) | |
5724 | { | |
5725 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5726 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f | 5727 | struct hclge_desc desc[3]; |
46a3df9f S |
5728 | int status; |
5729 | ||
5730 | /* mac addr check */ | |
5731 | if (!is_multicast_ether_addr(addr)) { | |
5732 | dev_err(&hdev->pdev->dev, | |
5733 | "Add mc mac err! invalid mac:%pM.\n", | |
5734 | addr); | |
5735 | return -EINVAL; | |
5736 | } | |
5737 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5738 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5739 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5740 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
738a3401 | 5741 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5742 | hclge_prepare_mac_addr(&req, addr); |
5743 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5744 | if (!status) { | |
5745 | /* This mac addr exist, update VFID for it */ | |
5746 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5747 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5748 | } else { | |
5749 | /* This mac addr do not exist, add new entry for it */ | |
5750 | memset(desc[0].data, 0, sizeof(desc[0].data)); | |
5751 | memset(desc[1].data, 0, sizeof(desc[0].data)); | |
5752 | memset(desc[2].data, 0, sizeof(desc[0].data)); | |
5753 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
5754 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5755 | } | |
5756 | ||
55b049be JS |
5757 | if (status == -ENOSPC) |
5758 | dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); | |
46a3df9f S |
5759 | |
5760 | return status; | |
5761 | } | |
5762 | ||
5763 | static int hclge_rm_mc_addr(struct hnae3_handle *handle, | |
5764 | const unsigned char *addr) | |
5765 | { | |
5766 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5767 | ||
5768 | return hclge_rm_mc_addr_common(vport, addr); | |
5769 | } | |
5770 | ||
5771 | int hclge_rm_mc_addr_common(struct hclge_vport *vport, | |
5772 | const unsigned char *addr) | |
5773 | { | |
5774 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 5775 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
5776 | enum hclge_cmd_status status; |
5777 | struct hclge_desc desc[3]; | |
46a3df9f S |
5778 | |
5779 | /* mac addr check */ | |
5780 | if (!is_multicast_ether_addr(addr)) { | |
5781 | dev_dbg(&hdev->pdev->dev, | |
5782 | "Remove mc mac err! invalid mac:%pM.\n", | |
5783 | addr); | |
5784 | return -EINVAL; | |
5785 | } | |
5786 | ||
5787 | memset(&req, 0, sizeof(req)); | |
ccc23ef3 PL |
5788 | hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
5789 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
5790 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
738a3401 | 5791 | hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
46a3df9f S |
5792 | hclge_prepare_mac_addr(&req, addr); |
5793 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
5794 | if (!status) { | |
5795 | /* This mac addr exist, remove this handle's VFID for it */ | |
5796 | hclge_update_desc_vfid(desc, vport->vport_id, true); | |
5797 | ||
5798 | if (hclge_is_all_function_id_zero(desc)) | |
5799 | /* All the vfid is zero, so need to delete this entry */ | |
5800 | status = hclge_remove_mac_vlan_tbl(vport, &req); | |
5801 | else | |
5802 | /* Not all the vfid is zero, update the vfid */ | |
5803 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
5804 | ||
5805 | } else { | |
a832d8b5 XW |
5806 | /* Maybe this mac address is in mta table, but it cannot be |
5807 | * deleted here because an entry of mta represents an address | |
5808 | * range rather than a specific address. the delete action to | |
5809 | * all entries will take effect in update_mta_status called by | |
5810 | * hns3_nic_set_rx_mode. | |
5811 | */ | |
5812 | status = 0; | |
46a3df9f S |
5813 | } |
5814 | ||
46a3df9f S |
5815 | return status; |
5816 | } | |
5817 | ||
635bfb58 FL |
5818 | static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, |
5819 | u16 cmdq_resp, u8 resp_code) | |
5820 | { | |
5821 | #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 | |
5822 | #define HCLGE_ETHERTYPE_ALREADY_ADD 1 | |
5823 | #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 | |
5824 | #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 | |
5825 | ||
5826 | int return_status; | |
5827 | ||
5828 | if (cmdq_resp) { | |
5829 | dev_err(&hdev->pdev->dev, | |
5830 | "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", | |
5831 | cmdq_resp); | |
5832 | return -EIO; | |
5833 | } | |
5834 | ||
5835 | switch (resp_code) { | |
5836 | case HCLGE_ETHERTYPE_SUCCESS_ADD: | |
5837 | case HCLGE_ETHERTYPE_ALREADY_ADD: | |
5838 | return_status = 0; | |
5839 | break; | |
5840 | case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: | |
5841 | dev_err(&hdev->pdev->dev, | |
5842 | "add mac ethertype failed for manager table overflow.\n"); | |
5843 | return_status = -EIO; | |
5844 | break; | |
5845 | case HCLGE_ETHERTYPE_KEY_CONFLICT: | |
5846 | dev_err(&hdev->pdev->dev, | |
5847 | "add mac ethertype failed for key conflict.\n"); | |
5848 | return_status = -EIO; | |
5849 | break; | |
5850 | default: | |
5851 | dev_err(&hdev->pdev->dev, | |
5852 | "add mac ethertype failed for undefined, code=%d.\n", | |
5853 | resp_code); | |
5854 | return_status = -EIO; | |
5855 | } | |
5856 | ||
5857 | return return_status; | |
5858 | } | |
5859 | ||
5860 | static int hclge_add_mgr_tbl(struct hclge_dev *hdev, | |
5861 | const struct hclge_mac_mgr_tbl_entry_cmd *req) | |
5862 | { | |
5863 | struct hclge_desc desc; | |
5864 | u8 resp_code; | |
5865 | u16 retval; | |
5866 | int ret; | |
5867 | ||
5868 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); | |
5869 | memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); | |
5870 | ||
5871 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
5872 | if (ret) { | |
5873 | dev_err(&hdev->pdev->dev, | |
5874 | "add mac ethertype failed for cmd_send, ret =%d.\n", | |
5875 | ret); | |
5876 | return ret; | |
5877 | } | |
5878 | ||
5879 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; | |
5880 | retval = le16_to_cpu(desc.retval); | |
5881 | ||
5882 | return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); | |
5883 | } | |
5884 | ||
5885 | static int init_mgr_tbl(struct hclge_dev *hdev) | |
5886 | { | |
5887 | int ret; | |
5888 | int i; | |
5889 | ||
5890 | for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { | |
5891 | ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); | |
5892 | if (ret) { | |
5893 | dev_err(&hdev->pdev->dev, | |
5894 | "add mac ethertype failed, ret =%d.\n", | |
5895 | ret); | |
5896 | return ret; | |
5897 | } | |
5898 | } | |
5899 | ||
5900 | return 0; | |
5901 | } | |
5902 | ||
46a3df9f S |
5903 | static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) |
5904 | { | |
5905 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5906 | struct hclge_dev *hdev = vport->back; | |
5907 | ||
5908 | ether_addr_copy(p, hdev->hw.mac.mac_addr); | |
5909 | } | |
5910 | ||
3cbf5e2d FL |
5911 | static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, |
5912 | bool is_first) | |
46a3df9f S |
5913 | { |
5914 | const unsigned char *new_addr = (const unsigned char *)p; | |
5915 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5916 | struct hclge_dev *hdev = vport->back; | |
20a5c4c0 | 5917 | int ret; |
46a3df9f S |
5918 | |
5919 | /* mac addr check */ | |
5920 | if (is_zero_ether_addr(new_addr) || | |
5921 | is_broadcast_ether_addr(new_addr) || | |
5922 | is_multicast_ether_addr(new_addr)) { | |
5923 | dev_err(&hdev->pdev->dev, | |
5924 | "Change uc mac err! invalid mac:%p.\n", | |
5925 | new_addr); | |
5926 | return -EINVAL; | |
5927 | } | |
5928 | ||
3cbf5e2d | 5929 | if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) |
20a5c4c0 | 5930 | dev_warn(&hdev->pdev->dev, |
3cbf5e2d | 5931 | "remove old uc mac address fail.\n"); |
46a3df9f | 5932 | |
20a5c4c0 FL |
5933 | ret = hclge_add_uc_addr(handle, new_addr); |
5934 | if (ret) { | |
5935 | dev_err(&hdev->pdev->dev, | |
5936 | "add uc mac address fail, ret =%d.\n", | |
5937 | ret); | |
5938 | ||
3cbf5e2d FL |
5939 | if (!is_first && |
5940 | hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) | |
20a5c4c0 | 5941 | dev_err(&hdev->pdev->dev, |
3cbf5e2d | 5942 | "restore uc mac address fail.\n"); |
20a5c4c0 FL |
5943 | |
5944 | return -EIO; | |
46a3df9f S |
5945 | } |
5946 | ||
532fdd5e | 5947 | ret = hclge_pause_addr_cfg(hdev, new_addr); |
20a5c4c0 FL |
5948 | if (ret) { |
5949 | dev_err(&hdev->pdev->dev, | |
5950 | "configure mac pause address fail, ret =%d.\n", | |
5951 | ret); | |
5952 | return -EIO; | |
5953 | } | |
5954 | ||
5955 | ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); | |
5956 | ||
5957 | return 0; | |
46a3df9f S |
5958 | } |
5959 | ||
a185d723 XW |
5960 | static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, |
5961 | int cmd) | |
5962 | { | |
5963 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5964 | struct hclge_dev *hdev = vport->back; | |
5965 | ||
5966 | if (!hdev->hw.mac.phydev) | |
5967 | return -EOPNOTSUPP; | |
5968 | ||
5969 | return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); | |
5970 | } | |
5971 | ||
46a3df9f | 5972 | static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, |
0e44d430 | 5973 | u8 fe_type, bool filter_en) |
46a3df9f | 5974 | { |
d44f9b63 | 5975 | struct hclge_vlan_filter_ctrl_cmd *req; |
46a3df9f S |
5976 | struct hclge_desc desc; |
5977 | int ret; | |
5978 | ||
5979 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); | |
5980 | ||
d44f9b63 | 5981 | req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; |
46a3df9f | 5982 | req->vlan_type = vlan_type; |
0e44d430 | 5983 | req->vlan_fe = filter_en ? fe_type : 0; |
46a3df9f S |
5984 | |
5985 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
90415e85 | 5986 | if (ret) |
46a3df9f S |
5987 | dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", |
5988 | ret); | |
46a3df9f | 5989 | |
90415e85 | 5990 | return ret; |
46a3df9f S |
5991 | } |
5992 | ||
d818396d JS |
5993 | #define HCLGE_FILTER_TYPE_VF 0 |
5994 | #define HCLGE_FILTER_TYPE_PORT 1 | |
0e44d430 ZL |
5995 | #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) |
5996 | #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) | |
5997 | #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) | |
5998 | #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) | |
5999 | #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) | |
6000 | #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ | |
6001 | | HCLGE_FILTER_FE_ROCE_EGRESS_B) | |
6002 | #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ | |
6003 | | HCLGE_FILTER_FE_ROCE_INGRESS_B) | |
d818396d JS |
6004 | |
6005 | static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) | |
6006 | { | |
6007 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6008 | struct hclge_dev *hdev = vport->back; | |
6009 | ||
0e44d430 ZL |
6010 | if (hdev->pdev->revision >= 0x21) { |
6011 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6012 | HCLGE_FILTER_FE_EGRESS, enable); | |
6013 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, | |
6014 | HCLGE_FILTER_FE_INGRESS, enable); | |
6015 | } else { | |
6016 | hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6017 | HCLGE_FILTER_FE_EGRESS_V1_B, enable); | |
6018 | } | |
1e3653db JS |
6019 | if (enable) |
6020 | handle->netdev_flags |= HNAE3_VLAN_FLTR; | |
6021 | else | |
6022 | handle->netdev_flags &= ~HNAE3_VLAN_FLTR; | |
d818396d JS |
6023 | } |
6024 | ||
4e66632d YL |
6025 | static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, |
6026 | bool is_kill, u16 vlan, u8 qos, | |
6027 | __be16 proto) | |
46a3df9f S |
6028 | { |
6029 | #define HCLGE_MAX_VF_BYTES 16 | |
d44f9b63 YL |
6030 | struct hclge_vlan_filter_vf_cfg_cmd *req0; |
6031 | struct hclge_vlan_filter_vf_cfg_cmd *req1; | |
46a3df9f S |
6032 | struct hclge_desc desc[2]; |
6033 | u8 vf_byte_val; | |
6034 | u8 vf_byte_off; | |
6035 | int ret; | |
6036 | ||
6037 | hclge_cmd_setup_basic_desc(&desc[0], | |
6038 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6039 | hclge_cmd_setup_basic_desc(&desc[1], | |
6040 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
6041 | ||
6042 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
6043 | ||
6044 | vf_byte_off = vfid / 8; | |
6045 | vf_byte_val = 1 << (vfid % 8); | |
6046 | ||
d44f9b63 YL |
6047 | req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
6048 | req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; | |
46a3df9f | 6049 | |
a90bb9a5 | 6050 | req0->vlan_id = cpu_to_le16(vlan); |
46a3df9f S |
6051 | req0->vlan_cfg = is_kill; |
6052 | ||
6053 | if (vf_byte_off < HCLGE_MAX_VF_BYTES) | |
6054 | req0->vf_bitmap[vf_byte_off] = vf_byte_val; | |
6055 | else | |
6056 | req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; | |
6057 | ||
6058 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
6059 | if (ret) { | |
6060 | dev_err(&hdev->pdev->dev, | |
6061 | "Send vf vlan command fail, ret =%d.\n", | |
6062 | ret); | |
6063 | return ret; | |
6064 | } | |
6065 | ||
6066 | if (!is_kill) { | |
715d610d | 6067 | #define HCLGE_VF_VLAN_NO_ENTRY 2 |
46a3df9f S |
6068 | if (!req0->resp_code || req0->resp_code == 1) |
6069 | return 0; | |
6070 | ||
715d610d YL |
6071 | if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { |
6072 | dev_warn(&hdev->pdev->dev, | |
6073 | "vf vlan table is full, vf vlan filter is disabled\n"); | |
6074 | return 0; | |
6075 | } | |
6076 | ||
46a3df9f S |
6077 | dev_err(&hdev->pdev->dev, |
6078 | "Add vf vlan filter fail, ret =%d.\n", | |
6079 | req0->resp_code); | |
6080 | } else { | |
29d3a843 | 6081 | #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 |
46a3df9f S |
6082 | if (!req0->resp_code) |
6083 | return 0; | |
6084 | ||
29d3a843 YL |
6085 | if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { |
6086 | dev_warn(&hdev->pdev->dev, | |
6087 | "vlan %d filter is not in vf vlan table\n", | |
6088 | vlan); | |
6089 | return 0; | |
6090 | } | |
6091 | ||
46a3df9f S |
6092 | dev_err(&hdev->pdev->dev, |
6093 | "Kill vf vlan filter fail, ret =%d.\n", | |
6094 | req0->resp_code); | |
6095 | } | |
6096 | ||
6097 | return -EIO; | |
6098 | } | |
6099 | ||
4e66632d YL |
6100 | static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, |
6101 | u16 vlan_id, bool is_kill) | |
46a3df9f | 6102 | { |
d44f9b63 | 6103 | struct hclge_vlan_filter_pf_cfg_cmd *req; |
46a3df9f S |
6104 | struct hclge_desc desc; |
6105 | u8 vlan_offset_byte_val; | |
6106 | u8 vlan_offset_byte; | |
6107 | u8 vlan_offset_160; | |
6108 | int ret; | |
6109 | ||
6110 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); | |
6111 | ||
6112 | vlan_offset_160 = vlan_id / 160; | |
6113 | vlan_offset_byte = (vlan_id % 160) / 8; | |
6114 | vlan_offset_byte_val = 1 << (vlan_id % 8); | |
6115 | ||
d44f9b63 | 6116 | req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; |
46a3df9f S |
6117 | req->vlan_offset = vlan_offset_160; |
6118 | req->vlan_cfg = is_kill; | |
6119 | req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; | |
6120 | ||
6121 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4e66632d YL |
6122 | if (ret) |
6123 | dev_err(&hdev->pdev->dev, | |
6124 | "port vlan command, send fail, ret =%d.\n", ret); | |
6125 | return ret; | |
6126 | } | |
6127 | ||
6128 | static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, | |
6129 | u16 vport_id, u16 vlan_id, u8 qos, | |
6130 | bool is_kill) | |
6131 | { | |
6132 | u16 vport_idx, vport_num = 0; | |
6133 | int ret; | |
6134 | ||
4935129c YL |
6135 | if (is_kill && !vlan_id) |
6136 | return 0; | |
6137 | ||
4e66632d YL |
6138 | ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, |
6139 | 0, proto); | |
46a3df9f S |
6140 | if (ret) { |
6141 | dev_err(&hdev->pdev->dev, | |
4e66632d YL |
6142 | "Set %d vport vlan filter config fail, ret =%d.\n", |
6143 | vport_id, ret); | |
46a3df9f S |
6144 | return ret; |
6145 | } | |
6146 | ||
4e66632d YL |
6147 | /* vlan 0 may be added twice when 8021q module is enabled */ |
6148 | if (!is_kill && !vlan_id && | |
6149 | test_bit(vport_id, hdev->vlan_table[vlan_id])) | |
6150 | return 0; | |
6151 | ||
6152 | if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
46a3df9f | 6153 | dev_err(&hdev->pdev->dev, |
4e66632d YL |
6154 | "Add port vlan failed, vport %d is already in vlan %d\n", |
6155 | vport_id, vlan_id); | |
6156 | return -EINVAL; | |
46a3df9f S |
6157 | } |
6158 | ||
4e66632d YL |
6159 | if (is_kill && |
6160 | !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { | |
6161 | dev_err(&hdev->pdev->dev, | |
6162 | "Delete port vlan failed, vport %d is not in vlan %d\n", | |
6163 | vport_id, vlan_id); | |
6164 | return -EINVAL; | |
6165 | } | |
6166 | ||
3c6d4f43 | 6167 | for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) |
4e66632d YL |
6168 | vport_num++; |
6169 | ||
6170 | if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) | |
6171 | ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, | |
6172 | is_kill); | |
6173 | ||
6174 | return ret; | |
6175 | } | |
6176 | ||
6177 | int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, | |
6178 | u16 vlan_id, bool is_kill) | |
6179 | { | |
6180 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6181 | struct hclge_dev *hdev = vport->back; | |
6182 | ||
6183 | return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, | |
6184 | 0, is_kill); | |
46a3df9f S |
6185 | } |
6186 | ||
6187 | static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, | |
6188 | u16 vlan, u8 qos, __be16 proto) | |
6189 | { | |
6190 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6191 | struct hclge_dev *hdev = vport->back; | |
6192 | ||
6193 | if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) | |
6194 | return -EINVAL; | |
6195 | if (proto != htons(ETH_P_8021Q)) | |
6196 | return -EPROTONOSUPPORT; | |
6197 | ||
4e66632d | 6198 | return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); |
46a3df9f S |
6199 | } |
6200 | ||
e62f2a6b PL |
6201 | static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) |
6202 | { | |
6203 | struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; | |
6204 | struct hclge_vport_vtag_tx_cfg_cmd *req; | |
6205 | struct hclge_dev *hdev = vport->back; | |
6206 | struct hclge_desc desc; | |
6207 | int status; | |
6208 | ||
6209 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); | |
6210 | ||
6211 | req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; | |
6212 | req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); | |
6213 | req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); | |
ccc23ef3 PL |
6214 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, |
6215 | vcfg->accept_tag1 ? 1 : 0); | |
6216 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, | |
6217 | vcfg->accept_untag1 ? 1 : 0); | |
6218 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, | |
6219 | vcfg->accept_tag2 ? 1 : 0); | |
6220 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, | |
6221 | vcfg->accept_untag2 ? 1 : 0); | |
6222 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, | |
6223 | vcfg->insert_tag1_en ? 1 : 0); | |
6224 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, | |
6225 | vcfg->insert_tag2_en ? 1 : 0); | |
6226 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); | |
e62f2a6b PL |
6227 | |
6228 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6229 | req->vf_bitmap[req->vf_offset] = | |
6230 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6231 | ||
6232 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6233 | if (status) | |
6234 | dev_err(&hdev->pdev->dev, | |
6235 | "Send port txvlan cfg command fail, ret =%d\n", | |
6236 | status); | |
6237 | ||
6238 | return status; | |
6239 | } | |
6240 | ||
6241 | static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) | |
6242 | { | |
6243 | struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; | |
6244 | struct hclge_vport_vtag_rx_cfg_cmd *req; | |
6245 | struct hclge_dev *hdev = vport->back; | |
6246 | struct hclge_desc desc; | |
6247 | int status; | |
6248 | ||
6249 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); | |
6250 | ||
6251 | req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; | |
ccc23ef3 PL |
6252 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, |
6253 | vcfg->strip_tag1_en ? 1 : 0); | |
6254 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, | |
6255 | vcfg->strip_tag2_en ? 1 : 0); | |
6256 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, | |
6257 | vcfg->vlan1_vlan_prionly ? 1 : 0); | |
6258 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, | |
6259 | vcfg->vlan2_vlan_prionly ? 1 : 0); | |
e62f2a6b PL |
6260 | |
6261 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
6262 | req->vf_bitmap[req->vf_offset] = | |
6263 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
6264 | ||
6265 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6266 | if (status) | |
6267 | dev_err(&hdev->pdev->dev, | |
6268 | "Send port rxvlan cfg command fail, ret =%d\n", | |
6269 | status); | |
6270 | ||
6271 | return status; | |
6272 | } | |
6273 | ||
6274 | static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) | |
6275 | { | |
6276 | struct hclge_rx_vlan_type_cfg_cmd *rx_req; | |
6277 | struct hclge_tx_vlan_type_cfg_cmd *tx_req; | |
6278 | struct hclge_desc desc; | |
6279 | int status; | |
6280 | ||
6281 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); | |
6282 | rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; | |
6283 | rx_req->ot_fst_vlan_type = | |
6284 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); | |
6285 | rx_req->ot_sec_vlan_type = | |
6286 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); | |
6287 | rx_req->in_fst_vlan_type = | |
6288 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); | |
6289 | rx_req->in_sec_vlan_type = | |
6290 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); | |
6291 | ||
6292 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6293 | if (status) { | |
6294 | dev_err(&hdev->pdev->dev, | |
6295 | "Send rxvlan protocol type command fail, ret =%d\n", | |
6296 | status); | |
6297 | return status; | |
6298 | } | |
6299 | ||
6300 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); | |
6301 | ||
855f03fb | 6302 | tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; |
e62f2a6b PL |
6303 | tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); |
6304 | tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); | |
6305 | ||
6306 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6307 | if (status) | |
6308 | dev_err(&hdev->pdev->dev, | |
6309 | "Send txvlan protocol type command fail, ret =%d\n", | |
6310 | status); | |
6311 | ||
6312 | return status; | |
6313 | } | |
6314 | ||
46a3df9f S |
6315 | static int hclge_init_vlan_config(struct hclge_dev *hdev) |
6316 | { | |
e62f2a6b PL |
6317 | #define HCLGE_DEF_VLAN_TYPE 0x8100 |
6318 | ||
1e3653db | 6319 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
e62f2a6b | 6320 | struct hclge_vport *vport; |
46a3df9f | 6321 | int ret; |
e62f2a6b PL |
6322 | int i; |
6323 | ||
0e44d430 ZL |
6324 | if (hdev->pdev->revision >= 0x21) { |
6325 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6326 | HCLGE_FILTER_FE_EGRESS, true); | |
6327 | if (ret) | |
6328 | return ret; | |
46a3df9f | 6329 | |
0e44d430 ZL |
6330 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, |
6331 | HCLGE_FILTER_FE_INGRESS, true); | |
6332 | if (ret) | |
6333 | return ret; | |
6334 | } else { | |
6335 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, | |
6336 | HCLGE_FILTER_FE_EGRESS_V1_B, | |
6337 | true); | |
6338 | if (ret) | |
6339 | return ret; | |
6340 | } | |
46a3df9f | 6341 | |
1e3653db JS |
6342 | handle->netdev_flags |= HNAE3_VLAN_FLTR; |
6343 | ||
e62f2a6b PL |
6344 | hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; |
6345 | hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6346 | hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6347 | hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6348 | hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6349 | hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
6350 | ||
6351 | ret = hclge_set_vlan_protocol_type(hdev); | |
5e43aef8 L |
6352 | if (ret) |
6353 | return ret; | |
46a3df9f | 6354 | |
e62f2a6b PL |
6355 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
6356 | vport = &hdev->vport[i]; | |
b75b1a56 PL |
6357 | vport->txvlan_cfg.accept_tag1 = true; |
6358 | vport->txvlan_cfg.accept_untag1 = true; | |
6359 | ||
6360 | /* accept_tag2 and accept_untag2 are not supported on | |
6361 | * pdev revision(0x20), new revision support them. The | |
6362 | * value of this two fields will not return error when driver | |
6363 | * send command to fireware in revision(0x20). | |
6364 | * This two fields can not configured by user. | |
6365 | */ | |
6366 | vport->txvlan_cfg.accept_tag2 = true; | |
6367 | vport->txvlan_cfg.accept_untag2 = true; | |
6368 | ||
e62f2a6b PL |
6369 | vport->txvlan_cfg.insert_tag1_en = false; |
6370 | vport->txvlan_cfg.insert_tag2_en = false; | |
6371 | vport->txvlan_cfg.default_tag1 = 0; | |
6372 | vport->txvlan_cfg.default_tag2 = 0; | |
6373 | ||
6374 | ret = hclge_set_vlan_tx_offload_cfg(vport); | |
6375 | if (ret) | |
6376 | return ret; | |
6377 | ||
6378 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6379 | vport->rxvlan_cfg.strip_tag2_en = true; | |
6380 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6381 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6382 | ||
6383 | ret = hclge_set_vlan_rx_offload_cfg(vport); | |
6384 | if (ret) | |
6385 | return ret; | |
6386 | } | |
6387 | ||
4e66632d | 6388 | return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); |
46a3df9f S |
6389 | } |
6390 | ||
3849d494 | 6391 | int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
5f9a7732 PL |
6392 | { |
6393 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6394 | ||
6395 | vport->rxvlan_cfg.strip_tag1_en = false; | |
6396 | vport->rxvlan_cfg.strip_tag2_en = enable; | |
6397 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
6398 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
6399 | ||
6400 | return hclge_set_vlan_rx_offload_cfg(vport); | |
6401 | } | |
6402 | ||
4ee09281 | 6403 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) |
46a3df9f | 6404 | { |
d44f9b63 | 6405 | struct hclge_config_max_frm_size_cmd *req; |
46a3df9f | 6406 | struct hclge_desc desc; |
46a3df9f | 6407 | |
46a3df9f S |
6408 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); |
6409 | ||
d44f9b63 | 6410 | req = (struct hclge_config_max_frm_size_cmd *)desc.data; |
4ee09281 | 6411 | req->max_frm_size = cpu_to_le16(new_mps); |
b86fdbf3 | 6412 | req->min_frm_size = HCLGE_MAC_MIN_FRAME; |
46a3df9f | 6413 | |
4ee09281 | 6414 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
46a3df9f S |
6415 | } |
6416 | ||
12341881 FL |
6417 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
6418 | { | |
6419 | struct hclge_vport *vport = hclge_get_vport(handle); | |
b2c04029 YL |
6420 | |
6421 | return hclge_set_vport_mtu(vport, new_mtu); | |
6422 | } | |
6423 | ||
6424 | int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) | |
6425 | { | |
12341881 | 6426 | struct hclge_dev *hdev = vport->back; |
b2c04029 | 6427 | int i, max_frm_size, ret = 0; |
12341881 | 6428 | |
4ee09281 YL |
6429 | max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
6430 | if (max_frm_size < HCLGE_MAC_MIN_FRAME || | |
6431 | max_frm_size > HCLGE_MAC_MAX_FRAME) | |
6432 | return -EINVAL; | |
6433 | ||
b2c04029 YL |
6434 | max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); |
6435 | mutex_lock(&hdev->vport_lock); | |
6436 | /* VF's mps must fit within hdev->mps */ | |
6437 | if (vport->vport_id && max_frm_size > hdev->mps) { | |
6438 | mutex_unlock(&hdev->vport_lock); | |
6439 | return -EINVAL; | |
6440 | } else if (vport->vport_id) { | |
6441 | vport->mps = max_frm_size; | |
6442 | mutex_unlock(&hdev->vport_lock); | |
6443 | return 0; | |
6444 | } | |
6445 | ||
6446 | /* PF's mps must be greater then VF's mps */ | |
6447 | for (i = 1; i < hdev->num_alloc_vport; i++) | |
6448 | if (max_frm_size < hdev->vport[i].mps) { | |
6449 | mutex_unlock(&hdev->vport_lock); | |
6450 | return -EINVAL; | |
6451 | } | |
6452 | ||
4ee09281 | 6453 | ret = hclge_set_mac_mtu(hdev, max_frm_size); |
12341881 FL |
6454 | if (ret) { |
6455 | dev_err(&hdev->pdev->dev, | |
6456 | "Change mtu fail, ret =%d\n", ret); | |
b2c04029 | 6457 | goto out; |
12341881 FL |
6458 | } |
6459 | ||
4ee09281 | 6460 | hdev->mps = max_frm_size; |
b2c04029 | 6461 | vport->mps = max_frm_size; |
4ee09281 | 6462 | |
12341881 FL |
6463 | ret = hclge_buffer_alloc(hdev); |
6464 | if (ret) | |
6465 | dev_err(&hdev->pdev->dev, | |
6466 | "Allocate buffer fail, ret =%d\n", ret); | |
6467 | ||
b2c04029 YL |
6468 | out: |
6469 | mutex_unlock(&hdev->vport_lock); | |
12341881 FL |
6470 | return ret; |
6471 | } | |
6472 | ||
46a3df9f S |
6473 | static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, |
6474 | bool enable) | |
6475 | { | |
d44f9b63 | 6476 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6477 | struct hclge_desc desc; |
6478 | int ret; | |
6479 | ||
6480 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); | |
6481 | ||
d44f9b63 | 6482 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f | 6483 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
ccc23ef3 | 6484 | hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); |
46a3df9f S |
6485 | |
6486 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6487 | if (ret) { | |
6488 | dev_err(&hdev->pdev->dev, | |
6489 | "Send tqp reset cmd error, status =%d\n", ret); | |
6490 | return ret; | |
6491 | } | |
6492 | ||
6493 | return 0; | |
6494 | } | |
6495 | ||
6496 | static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) | |
6497 | { | |
d44f9b63 | 6498 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
6499 | struct hclge_desc desc; |
6500 | int ret; | |
6501 | ||
6502 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); | |
6503 | ||
d44f9b63 | 6504 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f S |
6505 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
6506 | ||
6507 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
6508 | if (ret) { | |
6509 | dev_err(&hdev->pdev->dev, | |
6510 | "Get reset status error, status =%d\n", ret); | |
6511 | return ret; | |
6512 | } | |
6513 | ||
ccc23ef3 | 6514 | return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); |
46a3df9f S |
6515 | } |
6516 | ||
e5e89cda PL |
6517 | static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, |
6518 | u16 queue_id) | |
6519 | { | |
6520 | struct hnae3_queue *queue; | |
6521 | struct hclge_tqp *tqp; | |
6522 | ||
6523 | queue = handle->kinfo.tqp[queue_id]; | |
6524 | tqp = container_of(queue, struct hclge_tqp, q); | |
6525 | ||
6526 | return tqp->index; | |
6527 | } | |
6528 | ||
abe62a63 | 6529 | int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
46a3df9f S |
6530 | { |
6531 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6532 | struct hclge_dev *hdev = vport->back; | |
6533 | int reset_try_times = 0; | |
6534 | int reset_status; | |
e5e89cda | 6535 | u16 queue_gid; |
abe62a63 | 6536 | int ret = 0; |
46a3df9f | 6537 | |
e5e89cda PL |
6538 | queue_gid = hclge_covert_handle_qid_global(handle, queue_id); |
6539 | ||
46a3df9f S |
6540 | ret = hclge_tqp_enable(hdev, queue_id, 0, false); |
6541 | if (ret) { | |
abe62a63 HT |
6542 | dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); |
6543 | return ret; | |
46a3df9f S |
6544 | } |
6545 | ||
e5e89cda | 6546 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); |
46a3df9f | 6547 | if (ret) { |
abe62a63 HT |
6548 | dev_err(&hdev->pdev->dev, |
6549 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6550 | return ret; | |
46a3df9f S |
6551 | } |
6552 | ||
6553 | reset_try_times = 0; | |
6554 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6555 | /* Wait for tqp hw reset */ | |
6556 | msleep(20); | |
e5e89cda | 6557 | reset_status = hclge_get_reset_status(hdev, queue_gid); |
46a3df9f S |
6558 | if (reset_status) |
6559 | break; | |
6560 | } | |
6561 | ||
6562 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
abe62a63 HT |
6563 | dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); |
6564 | return ret; | |
46a3df9f S |
6565 | } |
6566 | ||
e5e89cda | 6567 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); |
abe62a63 HT |
6568 | if (ret) |
6569 | dev_err(&hdev->pdev->dev, | |
6570 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6571 | ||
6572 | return ret; | |
46a3df9f S |
6573 | } |
6574 | ||
d3ea7fc4 PL |
6575 | void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) |
6576 | { | |
6577 | struct hclge_dev *hdev = vport->back; | |
6578 | int reset_try_times = 0; | |
6579 | int reset_status; | |
6580 | u16 queue_gid; | |
6581 | int ret; | |
6582 | ||
6583 | queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); | |
6584 | ||
6585 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); | |
6586 | if (ret) { | |
6587 | dev_warn(&hdev->pdev->dev, | |
6588 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
6589 | return; | |
6590 | } | |
6591 | ||
6592 | reset_try_times = 0; | |
6593 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
6594 | /* Wait for tqp hw reset */ | |
6595 | msleep(20); | |
6596 | reset_status = hclge_get_reset_status(hdev, queue_gid); | |
6597 | if (reset_status) | |
6598 | break; | |
6599 | } | |
6600 | ||
6601 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
6602 | dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); | |
6603 | return; | |
6604 | } | |
6605 | ||
6606 | ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); | |
6607 | if (ret) | |
6608 | dev_warn(&hdev->pdev->dev, | |
6609 | "Deassert the soft reset fail, ret = %d\n", ret); | |
6610 | } | |
6611 | ||
46a3df9f S |
6612 | static u32 hclge_get_fw_version(struct hnae3_handle *handle) |
6613 | { | |
6614 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6615 | struct hclge_dev *hdev = vport->back; | |
6616 | ||
6617 | return hdev->fw_version; | |
6618 | } | |
6619 | ||
09ea401e PL |
6620 | static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) |
6621 | { | |
6622 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6623 | ||
6624 | if (!phydev) | |
6625 | return; | |
6626 | ||
6627 | phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); | |
6628 | ||
6629 | if (rx_en) | |
6630 | phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; | |
6631 | ||
6632 | if (tx_en) | |
6633 | phydev->advertising ^= ADVERTISED_Asym_Pause; | |
6634 | } | |
6635 | ||
6636 | static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) | |
6637 | { | |
09ea401e PL |
6638 | int ret; |
6639 | ||
6640 | if (rx_en && tx_en) | |
7a28a82a | 6641 | hdev->fc_mode_last_time = HCLGE_FC_FULL; |
09ea401e | 6642 | else if (rx_en && !tx_en) |
7a28a82a | 6643 | hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; |
09ea401e | 6644 | else if (!rx_en && tx_en) |
7a28a82a | 6645 | hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; |
09ea401e | 6646 | else |
7a28a82a | 6647 | hdev->fc_mode_last_time = HCLGE_FC_NONE; |
09ea401e | 6648 | |
7a28a82a | 6649 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) |
09ea401e | 6650 | return 0; |
09ea401e PL |
6651 | |
6652 | ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); | |
6653 | if (ret) { | |
6654 | dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", | |
6655 | ret); | |
6656 | return ret; | |
6657 | } | |
6658 | ||
7a28a82a | 6659 | hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
09ea401e PL |
6660 | |
6661 | return 0; | |
6662 | } | |
6663 | ||
6282f2ea PL |
6664 | int hclge_cfg_flowctrl(struct hclge_dev *hdev) |
6665 | { | |
6666 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6667 | u16 remote_advertising = 0; | |
6668 | u16 local_advertising = 0; | |
6669 | u32 rx_pause, tx_pause; | |
6670 | u8 flowctl; | |
6671 | ||
6672 | if (!phydev->link || !phydev->autoneg) | |
6673 | return 0; | |
6674 | ||
6675 | if (phydev->advertising & ADVERTISED_Pause) | |
6676 | local_advertising = ADVERTISE_PAUSE_CAP; | |
6677 | ||
6678 | if (phydev->advertising & ADVERTISED_Asym_Pause) | |
6679 | local_advertising |= ADVERTISE_PAUSE_ASYM; | |
6680 | ||
6681 | if (phydev->pause) | |
6682 | remote_advertising = LPA_PAUSE_CAP; | |
6683 | ||
6684 | if (phydev->asym_pause) | |
6685 | remote_advertising |= LPA_PAUSE_ASYM; | |
6686 | ||
6687 | flowctl = mii_resolve_flowctrl_fdx(local_advertising, | |
6688 | remote_advertising); | |
6689 | tx_pause = flowctl & FLOW_CTRL_TX; | |
6690 | rx_pause = flowctl & FLOW_CTRL_RX; | |
6691 | ||
6692 | if (phydev->duplex == HCLGE_MAC_HALF) { | |
6693 | tx_pause = 0; | |
6694 | rx_pause = 0; | |
6695 | } | |
6696 | ||
6697 | return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); | |
6698 | } | |
6699 | ||
46a3df9f S |
6700 | static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, |
6701 | u32 *rx_en, u32 *tx_en) | |
6702 | { | |
6703 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6704 | struct hclge_dev *hdev = vport->back; | |
6705 | ||
6706 | *auto_neg = hclge_get_autoneg(handle); | |
6707 | ||
6708 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6709 | *rx_en = 0; | |
6710 | *tx_en = 0; | |
6711 | return; | |
6712 | } | |
6713 | ||
6714 | if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { | |
6715 | *rx_en = 1; | |
6716 | *tx_en = 0; | |
6717 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { | |
6718 | *tx_en = 1; | |
6719 | *rx_en = 0; | |
6720 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { | |
6721 | *rx_en = 1; | |
6722 | *tx_en = 1; | |
6723 | } else { | |
6724 | *rx_en = 0; | |
6725 | *tx_en = 0; | |
6726 | } | |
6727 | } | |
6728 | ||
09ea401e PL |
6729 | static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, |
6730 | u32 rx_en, u32 tx_en) | |
6731 | { | |
6732 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6733 | struct hclge_dev *hdev = vport->back; | |
6734 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6735 | u32 fc_autoneg; | |
6736 | ||
09ea401e PL |
6737 | fc_autoneg = hclge_get_autoneg(handle); |
6738 | if (auto_neg != fc_autoneg) { | |
6739 | dev_info(&hdev->pdev->dev, | |
6740 | "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); | |
6741 | return -EOPNOTSUPP; | |
6742 | } | |
6743 | ||
6744 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
6745 | dev_info(&hdev->pdev->dev, | |
6746 | "Priority flow control enabled. Cannot set link flow control.\n"); | |
6747 | return -EOPNOTSUPP; | |
6748 | } | |
6749 | ||
6750 | hclge_set_flowctrl_adv(hdev, rx_en, tx_en); | |
6751 | ||
6752 | if (!fc_autoneg) | |
6753 | return hclge_cfg_pauseparam(hdev, rx_en, tx_en); | |
6754 | ||
bef24782 FL |
6755 | /* Only support flow control negotiation for netdev with |
6756 | * phy attached for now. | |
6757 | */ | |
6758 | if (!phydev) | |
6759 | return -EOPNOTSUPP; | |
6760 | ||
09ea401e PL |
6761 | return phy_start_aneg(phydev); |
6762 | } | |
6763 | ||
46a3df9f S |
6764 | static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, |
6765 | u8 *auto_neg, u32 *speed, u8 *duplex) | |
6766 | { | |
6767 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6768 | struct hclge_dev *hdev = vport->back; | |
6769 | ||
6770 | if (speed) | |
6771 | *speed = hdev->hw.mac.speed; | |
6772 | if (duplex) | |
6773 | *duplex = hdev->hw.mac.duplex; | |
6774 | if (auto_neg) | |
6775 | *auto_neg = hdev->hw.mac.autoneg; | |
6776 | } | |
6777 | ||
6778 | static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) | |
6779 | { | |
6780 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6781 | struct hclge_dev *hdev = vport->back; | |
6782 | ||
6783 | if (media_type) | |
6784 | *media_type = hdev->hw.mac.media_type; | |
6785 | } | |
6786 | ||
6787 | static void hclge_get_mdix_mode(struct hnae3_handle *handle, | |
6788 | u8 *tp_mdix_ctrl, u8 *tp_mdix) | |
6789 | { | |
6790 | struct hclge_vport *vport = hclge_get_vport(handle); | |
6791 | struct hclge_dev *hdev = vport->back; | |
6792 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
6793 | int mdix_ctrl, mdix, retval, is_resolved; | |
6794 | ||
6795 | if (!phydev) { | |
6796 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6797 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6798 | return; | |
6799 | } | |
6800 | ||
6801 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); | |
6802 | ||
6803 | retval = phy_read(phydev, HCLGE_PHY_CSC_REG); | |
ccc23ef3 PL |
6804 | mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, |
6805 | HCLGE_PHY_MDIX_CTRL_S); | |
46a3df9f S |
6806 | |
6807 | retval = phy_read(phydev, HCLGE_PHY_CSS_REG); | |
ccc23ef3 PL |
6808 | mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); |
6809 | is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); | |
46a3df9f S |
6810 | |
6811 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); | |
6812 | ||
6813 | switch (mdix_ctrl) { | |
6814 | case 0x0: | |
6815 | *tp_mdix_ctrl = ETH_TP_MDI; | |
6816 | break; | |
6817 | case 0x1: | |
6818 | *tp_mdix_ctrl = ETH_TP_MDI_X; | |
6819 | break; | |
6820 | case 0x3: | |
6821 | *tp_mdix_ctrl = ETH_TP_MDI_AUTO; | |
6822 | break; | |
6823 | default: | |
6824 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
6825 | break; | |
6826 | } | |
6827 | ||
6828 | if (!is_resolved) | |
6829 | *tp_mdix = ETH_TP_MDI_INVALID; | |
6830 | else if (mdix) | |
6831 | *tp_mdix = ETH_TP_MDI_X; | |
6832 | else | |
6833 | *tp_mdix = ETH_TP_MDI; | |
6834 | } | |
6835 | ||
dda6b7d5 FL |
6836 | static int hclge_init_instance_hw(struct hclge_dev *hdev) |
6837 | { | |
6838 | return hclge_mac_connect_phy(hdev); | |
6839 | } | |
6840 | ||
6841 | static void hclge_uninit_instance_hw(struct hclge_dev *hdev) | |
6842 | { | |
6843 | hclge_mac_disconnect_phy(hdev); | |
6844 | } | |
6845 | ||
46a3df9f S |
6846 | static int hclge_init_client_instance(struct hnae3_client *client, |
6847 | struct hnae3_ae_dev *ae_dev) | |
6848 | { | |
6849 | struct hclge_dev *hdev = ae_dev->priv; | |
6850 | struct hclge_vport *vport; | |
6851 | int i, ret; | |
6852 | ||
6853 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
6854 | vport = &hdev->vport[i]; | |
6855 | ||
6856 | switch (client->type) { | |
6857 | case HNAE3_CLIENT_KNIC: | |
6858 | ||
6859 | hdev->nic_client = client; | |
6860 | vport->nic.client = client; | |
6861 | ret = client->ops->init_instance(&vport->nic); | |
6862 | if (ret) | |
2f59de78 | 6863 | goto clear_nic; |
46a3df9f | 6864 | |
dda6b7d5 FL |
6865 | ret = hclge_init_instance_hw(hdev); |
6866 | if (ret) { | |
6867 | client->ops->uninit_instance(&vport->nic, | |
6868 | 0); | |
2f59de78 | 6869 | goto clear_nic; |
dda6b7d5 FL |
6870 | } |
6871 | ||
8ed41eeb JS |
6872 | hnae3_set_client_init_flag(client, ae_dev, 1); |
6873 | ||
46a3df9f | 6874 | if (hdev->roce_client && |
e92a0843 | 6875 | hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
6876 | struct hnae3_client *rc = hdev->roce_client; |
6877 | ||
6878 | ret = hclge_init_roce_base_info(vport); | |
6879 | if (ret) | |
2f59de78 | 6880 | goto clear_roce; |
46a3df9f S |
6881 | |
6882 | ret = rc->ops->init_instance(&vport->roce); | |
6883 | if (ret) | |
2f59de78 | 6884 | goto clear_roce; |
8ed41eeb JS |
6885 | |
6886 | hnae3_set_client_init_flag(hdev->roce_client, | |
6887 | ae_dev, 1); | |
46a3df9f S |
6888 | } |
6889 | ||
6890 | break; | |
6891 | case HNAE3_CLIENT_UNIC: | |
6892 | hdev->nic_client = client; | |
6893 | vport->nic.client = client; | |
6894 | ||
6895 | ret = client->ops->init_instance(&vport->nic); | |
6896 | if (ret) | |
2f59de78 | 6897 | goto clear_nic; |
46a3df9f | 6898 | |
8ed41eeb JS |
6899 | hnae3_set_client_init_flag(client, ae_dev, 1); |
6900 | ||
46a3df9f S |
6901 | break; |
6902 | case HNAE3_CLIENT_ROCE: | |
e92a0843 | 6903 | if (hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
6904 | hdev->roce_client = client; |
6905 | vport->roce.client = client; | |
6906 | } | |
6907 | ||
3a46f34d | 6908 | if (hdev->roce_client && hdev->nic_client) { |
46a3df9f S |
6909 | ret = hclge_init_roce_base_info(vport); |
6910 | if (ret) | |
2f59de78 | 6911 | goto clear_roce; |
46a3df9f S |
6912 | |
6913 | ret = client->ops->init_instance(&vport->roce); | |
6914 | if (ret) | |
2f59de78 | 6915 | goto clear_roce; |
8ed41eeb JS |
6916 | |
6917 | hnae3_set_client_init_flag(client, ae_dev, 1); | |
46a3df9f | 6918 | } |
085920ba JS |
6919 | |
6920 | break; | |
6921 | default: | |
6922 | return -EINVAL; | |
46a3df9f S |
6923 | } |
6924 | } | |
6925 | ||
6926 | return 0; | |
2f59de78 JS |
6927 | |
6928 | clear_nic: | |
6929 | hdev->nic_client = NULL; | |
6930 | vport->nic.client = NULL; | |
6931 | return ret; | |
6932 | clear_roce: | |
6933 | hdev->roce_client = NULL; | |
6934 | vport->roce.client = NULL; | |
6935 | return ret; | |
46a3df9f S |
6936 | } |
6937 | ||
6938 | static void hclge_uninit_client_instance(struct hnae3_client *client, | |
6939 | struct hnae3_ae_dev *ae_dev) | |
6940 | { | |
6941 | struct hclge_dev *hdev = ae_dev->priv; | |
6942 | struct hclge_vport *vport; | |
6943 | int i; | |
6944 | ||
6945 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
6946 | vport = &hdev->vport[i]; | |
a17dcf3f | 6947 | if (hdev->roce_client) { |
46a3df9f S |
6948 | hdev->roce_client->ops->uninit_instance(&vport->roce, |
6949 | 0); | |
a17dcf3f L |
6950 | hdev->roce_client = NULL; |
6951 | vport->roce.client = NULL; | |
6952 | } | |
46a3df9f S |
6953 | if (client->type == HNAE3_CLIENT_ROCE) |
6954 | return; | |
2f59de78 | 6955 | if (hdev->nic_client && client->ops->uninit_instance) { |
dda6b7d5 | 6956 | hclge_uninit_instance_hw(hdev); |
46a3df9f | 6957 | client->ops->uninit_instance(&vport->nic, 0); |
a17dcf3f L |
6958 | hdev->nic_client = NULL; |
6959 | vport->nic.client = NULL; | |
6960 | } | |
46a3df9f S |
6961 | } |
6962 | } | |
6963 | ||
6964 | static int hclge_pci_init(struct hclge_dev *hdev) | |
6965 | { | |
6966 | struct pci_dev *pdev = hdev->pdev; | |
6967 | struct hclge_hw *hw; | |
6968 | int ret; | |
6969 | ||
6970 | ret = pci_enable_device(pdev); | |
6971 | if (ret) { | |
6972 | dev_err(&pdev->dev, "failed to enable PCI device\n"); | |
6c46284e | 6973 | return ret; |
46a3df9f S |
6974 | } |
6975 | ||
6976 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
6977 | if (ret) { | |
6978 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
6979 | if (ret) { | |
6980 | dev_err(&pdev->dev, | |
6981 | "can't set consistent PCI DMA"); | |
6982 | goto err_disable_device; | |
6983 | } | |
6984 | dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); | |
6985 | } | |
6986 | ||
6987 | ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); | |
6988 | if (ret) { | |
6989 | dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); | |
6990 | goto err_disable_device; | |
6991 | } | |
6992 | ||
6993 | pci_set_master(pdev); | |
6994 | hw = &hdev->hw; | |
46a3df9f S |
6995 | hw->io_base = pcim_iomap(pdev, 2, 0); |
6996 | if (!hw->io_base) { | |
6997 | dev_err(&pdev->dev, "Can't map configuration register space\n"); | |
6998 | ret = -ENOMEM; | |
6999 | goto err_clr_master; | |
7000 | } | |
7001 | ||
709eb41a L |
7002 | hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); |
7003 | ||
46a3df9f S |
7004 | return 0; |
7005 | err_clr_master: | |
7006 | pci_clear_master(pdev); | |
7007 | pci_release_regions(pdev); | |
7008 | err_disable_device: | |
7009 | pci_disable_device(pdev); | |
46a3df9f S |
7010 | |
7011 | return ret; | |
7012 | } | |
7013 | ||
7014 | static void hclge_pci_uninit(struct hclge_dev *hdev) | |
7015 | { | |
7016 | struct pci_dev *pdev = hdev->pdev; | |
7017 | ||
7d6d639b | 7018 | pcim_iounmap(pdev, hdev->hw.io_base); |
887c3820 | 7019 | pci_free_irq_vectors(pdev); |
46a3df9f S |
7020 | pci_clear_master(pdev); |
7021 | pci_release_mem_regions(pdev); | |
7022 | pci_disable_device(pdev); | |
7023 | } | |
7024 | ||
2ec3d9f0 PL |
7025 | static void hclge_state_init(struct hclge_dev *hdev) |
7026 | { | |
7027 | set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); | |
7028 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7029 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
7030 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
7031 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
7032 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
7033 | } | |
7034 | ||
7035 | static void hclge_state_uninit(struct hclge_dev *hdev) | |
7036 | { | |
7037 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7038 | ||
7039 | if (hdev->service_timer.function) | |
7040 | del_timer_sync(&hdev->service_timer); | |
1afdb53a HT |
7041 | if (hdev->reset_timer.function) |
7042 | del_timer_sync(&hdev->reset_timer); | |
2ec3d9f0 PL |
7043 | if (hdev->service_task.func) |
7044 | cancel_work_sync(&hdev->service_task); | |
7045 | if (hdev->rst_service_task.func) | |
7046 | cancel_work_sync(&hdev->rst_service_task); | |
7047 | if (hdev->mbx_service_task.func) | |
7048 | cancel_work_sync(&hdev->mbx_service_task); | |
7049 | } | |
7050 | ||
26977990 HT |
7051 | static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) |
7052 | { | |
7053 | #define HCLGE_FLR_WAIT_MS 100 | |
7054 | #define HCLGE_FLR_WAIT_CNT 50 | |
7055 | struct hclge_dev *hdev = ae_dev->priv; | |
7056 | int cnt = 0; | |
7057 | ||
7058 | clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); | |
7059 | clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7060 | set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); | |
7061 | hclge_reset_event(hdev->pdev, NULL); | |
7062 | ||
7063 | while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && | |
7064 | cnt++ < HCLGE_FLR_WAIT_CNT) | |
7065 | msleep(HCLGE_FLR_WAIT_MS); | |
7066 | ||
7067 | if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) | |
7068 | dev_err(&hdev->pdev->dev, | |
7069 | "flr wait down timeout: %d\n", cnt); | |
7070 | } | |
7071 | ||
7072 | static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) | |
7073 | { | |
7074 | struct hclge_dev *hdev = ae_dev->priv; | |
7075 | ||
7076 | set_bit(HNAE3_FLR_DONE, &hdev->flr_state); | |
7077 | } | |
7078 | ||
46a3df9f S |
7079 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
7080 | { | |
7081 | struct pci_dev *pdev = ae_dev->pdev; | |
46a3df9f S |
7082 | struct hclge_dev *hdev; |
7083 | int ret; | |
7084 | ||
7085 | hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); | |
7086 | if (!hdev) { | |
7087 | ret = -ENOMEM; | |
e0027501 | 7088 | goto out; |
46a3df9f S |
7089 | } |
7090 | ||
46a3df9f S |
7091 | hdev->pdev = pdev; |
7092 | hdev->ae_dev = ae_dev; | |
4ed340ab | 7093 | hdev->reset_type = HNAE3_NONE_RESET; |
1a2f7bf2 | 7094 | hdev->reset_level = HNAE3_FUNC_RESET; |
46a3df9f | 7095 | ae_dev->priv = hdev; |
4ee09281 | 7096 | hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
46a3df9f | 7097 | |
b2c04029 YL |
7098 | mutex_init(&hdev->vport_lock); |
7099 | ||
46a3df9f S |
7100 | ret = hclge_pci_init(hdev); |
7101 | if (ret) { | |
7102 | dev_err(&pdev->dev, "PCI init failed\n"); | |
e0027501 | 7103 | goto out; |
46a3df9f S |
7104 | } |
7105 | ||
3efb960f L |
7106 | /* Firmware command queue initialize */ |
7107 | ret = hclge_cmd_queue_init(hdev); | |
7108 | if (ret) { | |
7109 | dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); | |
e0027501 | 7110 | goto err_pci_uninit; |
3efb960f L |
7111 | } |
7112 | ||
7113 | /* Firmware command initialize */ | |
46a3df9f S |
7114 | ret = hclge_cmd_init(hdev); |
7115 | if (ret) | |
e0027501 | 7116 | goto err_cmd_uninit; |
46a3df9f S |
7117 | |
7118 | ret = hclge_get_cap(hdev); | |
7119 | if (ret) { | |
e00e2197 CIK |
7120 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", |
7121 | ret); | |
e0027501 | 7122 | goto err_cmd_uninit; |
46a3df9f S |
7123 | } |
7124 | ||
7125 | ret = hclge_configure(hdev); | |
7126 | if (ret) { | |
7127 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
e0027501 | 7128 | goto err_cmd_uninit; |
46a3df9f S |
7129 | } |
7130 | ||
887c3820 | 7131 | ret = hclge_init_msi(hdev); |
46a3df9f | 7132 | if (ret) { |
887c3820 | 7133 | dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); |
e0027501 | 7134 | goto err_cmd_uninit; |
46a3df9f S |
7135 | } |
7136 | ||
466b0c00 L |
7137 | ret = hclge_misc_irq_init(hdev); |
7138 | if (ret) { | |
7139 | dev_err(&pdev->dev, | |
7140 | "Misc IRQ(vector0) init error, ret = %d.\n", | |
7141 | ret); | |
e0027501 | 7142 | goto err_msi_uninit; |
466b0c00 L |
7143 | } |
7144 | ||
46a3df9f S |
7145 | ret = hclge_alloc_tqps(hdev); |
7146 | if (ret) { | |
7147 | dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); | |
e0027501 | 7148 | goto err_msi_irq_uninit; |
46a3df9f S |
7149 | } |
7150 | ||
7151 | ret = hclge_alloc_vport(hdev); | |
7152 | if (ret) { | |
7153 | dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); | |
e0027501 | 7154 | goto err_msi_irq_uninit; |
46a3df9f S |
7155 | } |
7156 | ||
7df7dad6 L |
7157 | ret = hclge_map_tqp(hdev); |
7158 | if (ret) { | |
7159 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
bc59f827 | 7160 | goto err_msi_irq_uninit; |
7df7dad6 L |
7161 | } |
7162 | ||
dea9a821 HT |
7163 | if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { |
7164 | ret = hclge_mac_mdio_config(hdev); | |
7165 | if (ret) { | |
7166 | dev_err(&hdev->pdev->dev, | |
7167 | "mdio config fail ret=%d\n", ret); | |
bc59f827 | 7168 | goto err_msi_irq_uninit; |
dea9a821 | 7169 | } |
cf9cca2d | 7170 | } |
7171 | ||
2da5ec58 JS |
7172 | ret = hclge_init_umv_space(hdev); |
7173 | if (ret) { | |
7174 | dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); | |
7175 | goto err_msi_irq_uninit; | |
7176 | } | |
7177 | ||
46a3df9f S |
7178 | ret = hclge_mac_init(hdev); |
7179 | if (ret) { | |
7180 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
e0027501 | 7181 | goto err_mdiobus_unreg; |
46a3df9f | 7182 | } |
46a3df9f S |
7183 | |
7184 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); | |
7185 | if (ret) { | |
7186 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
e0027501 | 7187 | goto err_mdiobus_unreg; |
46a3df9f S |
7188 | } |
7189 | ||
73f88b00 PL |
7190 | ret = hclge_config_gro(hdev, true); |
7191 | if (ret) | |
7192 | goto err_mdiobus_unreg; | |
7193 | ||
46a3df9f S |
7194 | ret = hclge_init_vlan_config(hdev); |
7195 | if (ret) { | |
7196 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
e0027501 | 7197 | goto err_mdiobus_unreg; |
46a3df9f S |
7198 | } |
7199 | ||
7200 | ret = hclge_tm_schd_init(hdev); | |
7201 | if (ret) { | |
7202 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
e0027501 | 7203 | goto err_mdiobus_unreg; |
68ece54e YL |
7204 | } |
7205 | ||
8015bb74 | 7206 | hclge_rss_init_cfg(hdev); |
68ece54e YL |
7207 | ret = hclge_rss_init_hw(hdev); |
7208 | if (ret) { | |
7209 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
e0027501 | 7210 | goto err_mdiobus_unreg; |
46a3df9f S |
7211 | } |
7212 | ||
635bfb58 FL |
7213 | ret = init_mgr_tbl(hdev); |
7214 | if (ret) { | |
7215 | dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); | |
e0027501 | 7216 | goto err_mdiobus_unreg; |
635bfb58 FL |
7217 | } |
7218 | ||
10a954bc JS |
7219 | ret = hclge_init_fd_config(hdev); |
7220 | if (ret) { | |
7221 | dev_err(&pdev->dev, | |
7222 | "fd table init fail, ret=%d\n", ret); | |
7223 | goto err_mdiobus_unreg; | |
7224 | } | |
7225 | ||
9f53588e SJ |
7226 | ret = hclge_hw_error_set_state(hdev, true); |
7227 | if (ret) { | |
7228 | dev_err(&pdev->dev, | |
7229 | "hw error interrupts enable failed, ret =%d\n", ret); | |
7230 | goto err_mdiobus_unreg; | |
7231 | } | |
7232 | ||
cacde272 YL |
7233 | hclge_dcb_ops_set(hdev); |
7234 | ||
d039ef68 | 7235 | timer_setup(&hdev->service_timer, hclge_service_timer, 0); |
1afdb53a | 7236 | timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); |
46a3df9f | 7237 | INIT_WORK(&hdev->service_task, hclge_service_task); |
ed4a1bb8 | 7238 | INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); |
22fd3468 | 7239 | INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); |
46a3df9f | 7240 | |
9ab4ad14 XW |
7241 | hclge_clear_all_event_cause(hdev); |
7242 | ||
466b0c00 L |
7243 | /* Enable MISC vector(vector0) */ |
7244 | hclge_enable_vector(&hdev->misc_vector, true); | |
7245 | ||
2ec3d9f0 | 7246 | hclge_state_init(hdev); |
1a2f7bf2 | 7247 | hdev->last_reset_time = jiffies; |
46a3df9f S |
7248 | |
7249 | pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); | |
7250 | return 0; | |
7251 | ||
e0027501 HT |
7252 | err_mdiobus_unreg: |
7253 | if (hdev->hw.mac.phydev) | |
7254 | mdiobus_unregister(hdev->hw.mac.mdio_bus); | |
e0027501 HT |
7255 | err_msi_irq_uninit: |
7256 | hclge_misc_irq_uninit(hdev); | |
7257 | err_msi_uninit: | |
7258 | pci_free_irq_vectors(pdev); | |
7259 | err_cmd_uninit: | |
7260 | hclge_destroy_cmd_queue(&hdev->hw); | |
7261 | err_pci_uninit: | |
7d6d639b | 7262 | pcim_iounmap(pdev, hdev->hw.io_base); |
e0027501 | 7263 | pci_clear_master(pdev); |
46a3df9f | 7264 | pci_release_regions(pdev); |
e0027501 | 7265 | pci_disable_device(pdev); |
e0027501 | 7266 | out: |
46a3df9f S |
7267 | return ret; |
7268 | } | |
7269 | ||
c6dc5213 | 7270 | static void hclge_stats_clear(struct hclge_dev *hdev) |
7271 | { | |
7272 | memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); | |
7273 | } | |
7274 | ||
337460de YL |
7275 | static void hclge_reset_vport_state(struct hclge_dev *hdev) |
7276 | { | |
7277 | struct hclge_vport *vport = hdev->vport; | |
7278 | int i; | |
7279 | ||
7280 | for (i = 0; i < hdev->num_alloc_vport; i++) { | |
7281 | hclge_vport_start(vport); | |
7282 | vport++; | |
7283 | } | |
7284 | } | |
7285 | ||
4ed340ab L |
7286 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) |
7287 | { | |
7288 | struct hclge_dev *hdev = ae_dev->priv; | |
7289 | struct pci_dev *pdev = ae_dev->pdev; | |
7290 | int ret; | |
7291 | ||
7292 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
7293 | ||
c6dc5213 | 7294 | hclge_stats_clear(hdev); |
4e66632d | 7295 | memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); |
c6dc5213 | 7296 | |
4ed340ab L |
7297 | ret = hclge_cmd_init(hdev); |
7298 | if (ret) { | |
7299 | dev_err(&pdev->dev, "Cmd queue init failed\n"); | |
7300 | return ret; | |
7301 | } | |
7302 | ||
7303 | ret = hclge_get_cap(hdev); | |
7304 | if (ret) { | |
7305 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", | |
7306 | ret); | |
7307 | return ret; | |
7308 | } | |
7309 | ||
7310 | ret = hclge_configure(hdev); | |
7311 | if (ret) { | |
7312 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
7313 | return ret; | |
7314 | } | |
7315 | ||
7316 | ret = hclge_map_tqp(hdev); | |
7317 | if (ret) { | |
7318 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
7319 | return ret; | |
7320 | } | |
7321 | ||
2da5ec58 JS |
7322 | hclge_reset_umv_space(hdev); |
7323 | ||
4ed340ab L |
7324 | ret = hclge_mac_init(hdev); |
7325 | if (ret) { | |
7326 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
7327 | return ret; | |
7328 | } | |
7329 | ||
4ed340ab L |
7330 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); |
7331 | if (ret) { | |
7332 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
7333 | return ret; | |
7334 | } | |
7335 | ||
73f88b00 PL |
7336 | ret = hclge_config_gro(hdev, true); |
7337 | if (ret) | |
7338 | return ret; | |
7339 | ||
4ed340ab L |
7340 | ret = hclge_init_vlan_config(hdev); |
7341 | if (ret) { | |
7342 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
7343 | return ret; | |
7344 | } | |
7345 | ||
d85f1ab5 | 7346 | ret = hclge_tm_init_hw(hdev); |
4ed340ab | 7347 | if (ret) { |
d85f1ab5 | 7348 | dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); |
4ed340ab L |
7349 | return ret; |
7350 | } | |
7351 | ||
7352 | ret = hclge_rss_init_hw(hdev); | |
7353 | if (ret) { | |
7354 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
7355 | return ret; | |
7356 | } | |
7357 | ||
10a954bc JS |
7358 | ret = hclge_init_fd_config(hdev); |
7359 | if (ret) { | |
7360 | dev_err(&pdev->dev, | |
7361 | "fd table init fail, ret=%d\n", ret); | |
7362 | return ret; | |
7363 | } | |
7364 | ||
78807a3d SJ |
7365 | /* Re-enable the TM hw error interrupts because |
7366 | * they get disabled on core/global reset. | |
7367 | */ | |
7368 | if (hclge_enable_tm_hw_error(hdev, true)) | |
7369 | dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n"); | |
7370 | ||
337460de YL |
7371 | hclge_reset_vport_state(hdev); |
7372 | ||
4ed340ab L |
7373 | dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", |
7374 | HCLGE_DRIVER_NAME); | |
7375 | ||
7376 | return 0; | |
7377 | } | |
7378 | ||
46a3df9f S |
7379 | static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
7380 | { | |
7381 | struct hclge_dev *hdev = ae_dev->priv; | |
7382 | struct hclge_mac *mac = &hdev->hw.mac; | |
7383 | ||
2ec3d9f0 | 7384 | hclge_state_uninit(hdev); |
46a3df9f S |
7385 | |
7386 | if (mac->phydev) | |
7387 | mdiobus_unregister(mac->mdio_bus); | |
7388 | ||
2da5ec58 JS |
7389 | hclge_uninit_umv_space(hdev); |
7390 | ||
466b0c00 L |
7391 | /* Disable MISC vector(vector0) */ |
7392 | hclge_enable_vector(&hdev->misc_vector, false); | |
9ab4ad14 XW |
7393 | synchronize_irq(hdev->misc_vector.vector_irq); |
7394 | ||
9f53588e | 7395 | hclge_hw_error_set_state(hdev, false); |
46a3df9f | 7396 | hclge_destroy_cmd_queue(&hdev->hw); |
202f2014 | 7397 | hclge_misc_irq_uninit(hdev); |
46a3df9f | 7398 | hclge_pci_uninit(hdev); |
b2c04029 | 7399 | mutex_destroy(&hdev->vport_lock); |
46a3df9f S |
7400 | ae_dev->priv = NULL; |
7401 | } | |
7402 | ||
4f645a90 PL |
7403 | static u32 hclge_get_max_channels(struct hnae3_handle *handle) |
7404 | { | |
7405 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
7406 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7407 | struct hclge_dev *hdev = vport->back; | |
7408 | ||
7409 | return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); | |
7410 | } | |
7411 | ||
7412 | static void hclge_get_channels(struct hnae3_handle *handle, | |
7413 | struct ethtool_channels *ch) | |
7414 | { | |
7415 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7416 | ||
7417 | ch->max_combined = hclge_get_max_channels(handle); | |
7418 | ch->other_count = 1; | |
7419 | ch->max_other = 1; | |
7420 | ch->combined_count = vport->alloc_tqps; | |
7421 | } | |
7422 | ||
f1f779ce | 7423 | static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, |
08ca3d58 | 7424 | u16 *alloc_tqps, u16 *max_rss_size) |
f1f779ce PL |
7425 | { |
7426 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7427 | struct hclge_dev *hdev = vport->back; | |
f1f779ce | 7428 | |
08ca3d58 | 7429 | *alloc_tqps = vport->alloc_tqps; |
f1f779ce PL |
7430 | *max_rss_size = hdev->rss_size_max; |
7431 | } | |
7432 | ||
7433 | static void hclge_release_tqp(struct hclge_vport *vport) | |
7434 | { | |
7435 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7436 | struct hclge_dev *hdev = vport->back; | |
7437 | int i; | |
7438 | ||
7439 | for (i = 0; i < kinfo->num_tqps; i++) { | |
7440 | struct hclge_tqp *tqp = | |
7441 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
7442 | ||
7443 | tqp->q.handle = NULL; | |
7444 | tqp->q.tqp_index = 0; | |
7445 | tqp->alloced = false; | |
7446 | } | |
7447 | ||
7448 | devm_kfree(&hdev->pdev->dev, kinfo->tqp); | |
7449 | kinfo->tqp = NULL; | |
7450 | } | |
7451 | ||
7452 | static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) | |
7453 | { | |
7454 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7455 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
7456 | struct hclge_dev *hdev = vport->back; | |
7457 | int cur_rss_size = kinfo->rss_size; | |
7458 | int cur_tqps = kinfo->num_tqps; | |
7459 | u16 tc_offset[HCLGE_MAX_TC_NUM]; | |
7460 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | |
7461 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
7462 | u16 roundup_size; | |
7463 | u32 *rss_indir; | |
7464 | int ret, i; | |
7465 | ||
f73c9107 | 7466 | /* Free old tqps, and reallocate with new tqp number when nic setup */ |
f1f779ce PL |
7467 | hclge_release_tqp(vport); |
7468 | ||
81356b1f | 7469 | ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); |
f1f779ce PL |
7470 | if (ret) { |
7471 | dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); | |
7472 | return ret; | |
7473 | } | |
7474 | ||
7475 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
7476 | if (ret) { | |
7477 | dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); | |
7478 | return ret; | |
7479 | } | |
7480 | ||
7481 | ret = hclge_tm_schd_init(hdev); | |
7482 | if (ret) { | |
7483 | dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
7484 | return ret; | |
7485 | } | |
7486 | ||
7487 | roundup_size = roundup_pow_of_two(kinfo->rss_size); | |
7488 | roundup_size = ilog2(roundup_size); | |
7489 | /* Set the RSS TC mode according to the new RSS size */ | |
7490 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
7491 | tc_valid[i] = 0; | |
7492 | ||
7493 | if (!(hdev->hw_tc_map & BIT(i))) | |
7494 | continue; | |
7495 | ||
7496 | tc_valid[i] = 1; | |
7497 | tc_size[i] = roundup_size; | |
7498 | tc_offset[i] = kinfo->rss_size * i; | |
7499 | } | |
7500 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); | |
7501 | if (ret) | |
7502 | return ret; | |
7503 | ||
7504 | /* Reinitializes the rss indirect table according to the new RSS size */ | |
7505 | rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); | |
7506 | if (!rss_indir) | |
7507 | return -ENOMEM; | |
7508 | ||
7509 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
7510 | rss_indir[i] = i % kinfo->rss_size; | |
7511 | ||
7512 | ret = hclge_set_rss(handle, rss_indir, NULL, 0); | |
7513 | if (ret) | |
7514 | dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", | |
7515 | ret); | |
7516 | ||
7517 | kfree(rss_indir); | |
7518 | ||
7519 | if (!ret) | |
7520 | dev_info(&hdev->pdev->dev, | |
7521 | "Channels changed, rss_size from %d to %d, tqps from %d to %d", | |
7522 | cur_rss_size, kinfo->rss_size, | |
7523 | cur_tqps, kinfo->rss_size * kinfo->num_tc); | |
7524 | ||
7525 | return ret; | |
7526 | } | |
7527 | ||
db2a3e43 FL |
7528 | static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, |
7529 | u32 *regs_num_64_bit) | |
7530 | { | |
7531 | struct hclge_desc desc; | |
7532 | u32 total_num; | |
7533 | int ret; | |
7534 | ||
7535 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); | |
7536 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7537 | if (ret) { | |
7538 | dev_err(&hdev->pdev->dev, | |
7539 | "Query register number cmd failed, ret = %d.\n", ret); | |
7540 | return ret; | |
7541 | } | |
7542 | ||
7543 | *regs_num_32_bit = le32_to_cpu(desc.data[0]); | |
7544 | *regs_num_64_bit = le32_to_cpu(desc.data[1]); | |
7545 | ||
7546 | total_num = *regs_num_32_bit + *regs_num_64_bit; | |
7547 | if (!total_num) | |
7548 | return -EINVAL; | |
7549 | ||
7550 | return 0; | |
7551 | } | |
7552 | ||
7553 | static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7554 | void *data) | |
7555 | { | |
7556 | #define HCLGE_32_BIT_REG_RTN_DATANUM 8 | |
7557 | ||
7558 | struct hclge_desc *desc; | |
7559 | u32 *reg_val = data; | |
7560 | __le32 *desc_data; | |
7561 | int cmd_num; | |
7562 | int i, k, n; | |
7563 | int ret; | |
7564 | ||
7565 | if (regs_num == 0) | |
7566 | return 0; | |
7567 | ||
7568 | cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); | |
7569 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7570 | if (!desc) | |
7571 | return -ENOMEM; | |
7572 | ||
7573 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); | |
7574 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7575 | if (ret) { | |
7576 | dev_err(&hdev->pdev->dev, | |
7577 | "Query 32 bit register cmd failed, ret = %d.\n", ret); | |
7578 | kfree(desc); | |
7579 | return ret; | |
7580 | } | |
7581 | ||
7582 | for (i = 0; i < cmd_num; i++) { | |
7583 | if (i == 0) { | |
7584 | desc_data = (__le32 *)(&desc[i].data[0]); | |
7585 | n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; | |
7586 | } else { | |
7587 | desc_data = (__le32 *)(&desc[i]); | |
7588 | n = HCLGE_32_BIT_REG_RTN_DATANUM; | |
7589 | } | |
7590 | for (k = 0; k < n; k++) { | |
7591 | *reg_val++ = le32_to_cpu(*desc_data++); | |
7592 | ||
7593 | regs_num--; | |
7594 | if (!regs_num) | |
7595 | break; | |
7596 | } | |
7597 | } | |
7598 | ||
7599 | kfree(desc); | |
7600 | return 0; | |
7601 | } | |
7602 | ||
7603 | static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, | |
7604 | void *data) | |
7605 | { | |
7606 | #define HCLGE_64_BIT_REG_RTN_DATANUM 4 | |
7607 | ||
7608 | struct hclge_desc *desc; | |
7609 | u64 *reg_val = data; | |
7610 | __le64 *desc_data; | |
7611 | int cmd_num; | |
7612 | int i, k, n; | |
7613 | int ret; | |
7614 | ||
7615 | if (regs_num == 0) | |
7616 | return 0; | |
7617 | ||
7618 | cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); | |
7619 | desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); | |
7620 | if (!desc) | |
7621 | return -ENOMEM; | |
7622 | ||
7623 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); | |
7624 | ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); | |
7625 | if (ret) { | |
7626 | dev_err(&hdev->pdev->dev, | |
7627 | "Query 64 bit register cmd failed, ret = %d.\n", ret); | |
7628 | kfree(desc); | |
7629 | return ret; | |
7630 | } | |
7631 | ||
7632 | for (i = 0; i < cmd_num; i++) { | |
7633 | if (i == 0) { | |
7634 | desc_data = (__le64 *)(&desc[i].data[0]); | |
7635 | n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; | |
7636 | } else { | |
7637 | desc_data = (__le64 *)(&desc[i]); | |
7638 | n = HCLGE_64_BIT_REG_RTN_DATANUM; | |
7639 | } | |
7640 | for (k = 0; k < n; k++) { | |
7641 | *reg_val++ = le64_to_cpu(*desc_data++); | |
7642 | ||
7643 | regs_num--; | |
7644 | if (!regs_num) | |
7645 | break; | |
7646 | } | |
7647 | } | |
7648 | ||
7649 | kfree(desc); | |
7650 | return 0; | |
7651 | } | |
7652 | ||
7653 | static int hclge_get_regs_len(struct hnae3_handle *handle) | |
7654 | { | |
7655 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7656 | struct hclge_dev *hdev = vport->back; | |
7657 | u32 regs_num_32_bit, regs_num_64_bit; | |
7658 | int ret; | |
7659 | ||
7660 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7661 | if (ret) { | |
7662 | dev_err(&hdev->pdev->dev, | |
7663 | "Get register number failed, ret = %d.\n", ret); | |
7664 | return -EOPNOTSUPP; | |
7665 | } | |
7666 | ||
7667 | return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); | |
7668 | } | |
7669 | ||
7670 | static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, | |
7671 | void *data) | |
7672 | { | |
7673 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7674 | struct hclge_dev *hdev = vport->back; | |
7675 | u32 regs_num_32_bit, regs_num_64_bit; | |
7676 | int ret; | |
7677 | ||
7678 | *version = hdev->fw_version; | |
7679 | ||
7680 | ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); | |
7681 | if (ret) { | |
7682 | dev_err(&hdev->pdev->dev, | |
7683 | "Get register number failed, ret = %d.\n", ret); | |
7684 | return; | |
7685 | } | |
7686 | ||
7687 | ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); | |
7688 | if (ret) { | |
7689 | dev_err(&hdev->pdev->dev, | |
7690 | "Get 32 bit register failed, ret = %d.\n", ret); | |
7691 | return; | |
7692 | } | |
7693 | ||
7694 | data = (u32 *)data + regs_num_32_bit; | |
7695 | ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, | |
7696 | data); | |
7697 | if (ret) | |
7698 | dev_err(&hdev->pdev->dev, | |
7699 | "Get 64 bit register failed, ret = %d.\n", ret); | |
7700 | } | |
7701 | ||
fe3a3e15 | 7702 | static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) |
d9a0884e JS |
7703 | { |
7704 | struct hclge_set_led_state_cmd *req; | |
7705 | struct hclge_desc desc; | |
7706 | int ret; | |
7707 | ||
7708 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); | |
7709 | ||
7710 | req = (struct hclge_set_led_state_cmd *)desc.data; | |
ccc23ef3 PL |
7711 | hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, |
7712 | HCLGE_LED_LOCATE_STATE_S, locate_led_status); | |
d9a0884e JS |
7713 | |
7714 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
7715 | if (ret) | |
7716 | dev_err(&hdev->pdev->dev, | |
7717 | "Send set led state cmd error, ret =%d\n", ret); | |
7718 | ||
7719 | return ret; | |
7720 | } | |
7721 | ||
7722 | enum hclge_led_status { | |
7723 | HCLGE_LED_OFF, | |
7724 | HCLGE_LED_ON, | |
7725 | HCLGE_LED_NO_CHANGE = 0xFF, | |
7726 | }; | |
7727 | ||
7728 | static int hclge_set_led_id(struct hnae3_handle *handle, | |
7729 | enum ethtool_phys_id_state status) | |
7730 | { | |
d9a0884e JS |
7731 | struct hclge_vport *vport = hclge_get_vport(handle); |
7732 | struct hclge_dev *hdev = vport->back; | |
d9a0884e JS |
7733 | |
7734 | switch (status) { | |
7735 | case ETHTOOL_ID_ACTIVE: | |
fe3a3e15 | 7736 | return hclge_set_led_status(hdev, HCLGE_LED_ON); |
d9a0884e | 7737 | case ETHTOOL_ID_INACTIVE: |
fe3a3e15 | 7738 | return hclge_set_led_status(hdev, HCLGE_LED_OFF); |
d9a0884e | 7739 | default: |
fe3a3e15 | 7740 | return -EINVAL; |
d9a0884e | 7741 | } |
d9a0884e JS |
7742 | } |
7743 | ||
d92ceae9 FL |
7744 | static void hclge_get_link_mode(struct hnae3_handle *handle, |
7745 | unsigned long *supported, | |
7746 | unsigned long *advertising) | |
7747 | { | |
7748 | unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); | |
7749 | struct hclge_vport *vport = hclge_get_vport(handle); | |
7750 | struct hclge_dev *hdev = vport->back; | |
7751 | unsigned int idx = 0; | |
7752 | ||
7753 | for (; idx < size; idx++) { | |
7754 | supported[idx] = hdev->hw.mac.supported[idx]; | |
7755 | advertising[idx] = hdev->hw.mac.advertising[idx]; | |
7756 | } | |
7757 | } | |
7758 | ||
46a3df9f S |
7759 | static const struct hnae3_ae_ops hclge_ops = { |
7760 | .init_ae_dev = hclge_init_ae_dev, | |
7761 | .uninit_ae_dev = hclge_uninit_ae_dev, | |
26977990 HT |
7762 | .flr_prepare = hclge_flr_prepare, |
7763 | .flr_done = hclge_flr_done, | |
46a3df9f S |
7764 | .init_client_instance = hclge_init_client_instance, |
7765 | .uninit_client_instance = hclge_uninit_client_instance, | |
63d7e66f SM |
7766 | .map_ring_to_vector = hclge_map_ring_to_vector, |
7767 | .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, | |
46a3df9f | 7768 | .get_vector = hclge_get_vector, |
7412200c | 7769 | .put_vector = hclge_put_vector, |
46a3df9f | 7770 | .set_promisc_mode = hclge_set_promisc_mode, |
c39c4d98 | 7771 | .set_loopback = hclge_set_loopback, |
46a3df9f S |
7772 | .start = hclge_ae_start, |
7773 | .stop = hclge_ae_stop, | |
337460de YL |
7774 | .client_start = hclge_client_start, |
7775 | .client_stop = hclge_client_stop, | |
46a3df9f S |
7776 | .get_status = hclge_get_status, |
7777 | .get_ksettings_an_result = hclge_get_ksettings_an_result, | |
7778 | .update_speed_duplex_h = hclge_update_speed_duplex_h, | |
7779 | .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, | |
7780 | .get_media_type = hclge_get_media_type, | |
7781 | .get_rss_key_size = hclge_get_rss_key_size, | |
7782 | .get_rss_indir_size = hclge_get_rss_indir_size, | |
7783 | .get_rss = hclge_get_rss, | |
7784 | .set_rss = hclge_set_rss, | |
f7db940a | 7785 | .set_rss_tuple = hclge_set_rss_tuple, |
07d29954 | 7786 | .get_rss_tuple = hclge_get_rss_tuple, |
46a3df9f S |
7787 | .get_tc_size = hclge_get_tc_size, |
7788 | .get_mac_addr = hclge_get_mac_addr, | |
7789 | .set_mac_addr = hclge_set_mac_addr, | |
a185d723 | 7790 | .do_ioctl = hclge_do_ioctl, |
46a3df9f S |
7791 | .add_uc_addr = hclge_add_uc_addr, |
7792 | .rm_uc_addr = hclge_rm_uc_addr, | |
7793 | .add_mc_addr = hclge_add_mc_addr, | |
7794 | .rm_mc_addr = hclge_rm_mc_addr, | |
7795 | .set_autoneg = hclge_set_autoneg, | |
7796 | .get_autoneg = hclge_get_autoneg, | |
7797 | .get_pauseparam = hclge_get_pauseparam, | |
09ea401e | 7798 | .set_pauseparam = hclge_set_pauseparam, |
46a3df9f S |
7799 | .set_mtu = hclge_set_mtu, |
7800 | .reset_queue = hclge_reset_tqp, | |
7801 | .get_stats = hclge_get_stats, | |
7802 | .update_stats = hclge_update_stats, | |
7803 | .get_strings = hclge_get_strings, | |
7804 | .get_sset_count = hclge_get_sset_count, | |
7805 | .get_fw_version = hclge_get_fw_version, | |
7806 | .get_mdix_mode = hclge_get_mdix_mode, | |
d818396d | 7807 | .enable_vlan_filter = hclge_enable_vlan_filter, |
4e66632d | 7808 | .set_vlan_filter = hclge_set_vlan_filter, |
46a3df9f | 7809 | .set_vf_vlan_filter = hclge_set_vf_vlan_filter, |
5f9a7732 | 7810 | .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, |
4ed340ab | 7811 | .reset_event = hclge_reset_event, |
2c883d73 | 7812 | .set_default_reset_request = hclge_set_def_reset_request, |
f1f779ce PL |
7813 | .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, |
7814 | .set_channels = hclge_set_channels, | |
4f645a90 | 7815 | .get_channels = hclge_get_channels, |
db2a3e43 FL |
7816 | .get_regs_len = hclge_get_regs_len, |
7817 | .get_regs = hclge_get_regs, | |
d9a0884e | 7818 | .set_led_id = hclge_set_led_id, |
d92ceae9 | 7819 | .get_link_mode = hclge_get_link_mode, |
3ca8e27c JS |
7820 | .add_fd_entry = hclge_add_fd_entry, |
7821 | .del_fd_entry = hclge_del_fd_entry, | |
7ce98982 | 7822 | .del_all_fd_entries = hclge_del_all_fd_entries, |
295043a7 JS |
7823 | .get_fd_rule_cnt = hclge_get_fd_rule_cnt, |
7824 | .get_fd_rule_info = hclge_get_fd_rule_info, | |
7825 | .get_fd_all_rules = hclge_get_all_rules, | |
7ce98982 | 7826 | .restore_fd_rules = hclge_restore_fd_entries, |
d1f04a80 | 7827 | .enable_fd = hclge_enable_fd, |
00bb612a | 7828 | .process_hw_error = hclge_process_ras_hw_error, |
225c02eb HT |
7829 | .get_hw_reset_stat = hclge_get_hw_reset_stat, |
7830 | .ae_dev_resetting = hclge_ae_dev_resetting, | |
7831 | .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, | |
46a3df9f S |
7832 | }; |
7833 | ||
7834 | static struct hnae3_ae_algo ae_algo = { | |
7835 | .ops = &hclge_ops, | |
46a3df9f S |
7836 | .pdev_id_table = ae_algo_pci_tbl, |
7837 | }; | |
7838 | ||
7839 | static int hclge_init(void) | |
7840 | { | |
7841 | pr_info("%s is initializing\n", HCLGE_NAME); | |
7842 | ||
a4d090cc FL |
7843 | hnae3_register_ae_algo(&ae_algo); |
7844 | ||
7845 | return 0; | |
46a3df9f S |
7846 | } |
7847 | ||
7848 | static void hclge_exit(void) | |
7849 | { | |
7850 | hnae3_unregister_ae_algo(&ae_algo); | |
7851 | } | |
7852 | module_init(hclge_init); | |
7853 | module_exit(hclge_exit); | |
7854 | ||
7855 | MODULE_LICENSE("GPL"); | |
7856 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); | |
7857 | MODULE_DESCRIPTION("HCLGE Driver"); | |
7858 | MODULE_VERSION(HCLGE_MOD_VERSION); |