]>
Commit | Line | Data |
---|---|---|
46a3df9f S |
1 | /* |
2 | * Copyright (c) 2016-2017 Hisilicon Limited. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/acpi.h> | |
11 | #include <linux/device.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/netdevice.h> | |
18 | #include <linux/pci.h> | |
19 | #include <linux/platform_device.h> | |
d5752031 | 20 | #include <net/rtnetlink.h> |
46a3df9f | 21 | #include "hclge_cmd.h" |
cacde272 | 22 | #include "hclge_dcb.h" |
46a3df9f | 23 | #include "hclge_main.h" |
0cdbdd3e | 24 | #include "hclge_mbx.h" |
46a3df9f S |
25 | #include "hclge_mdio.h" |
26 | #include "hclge_tm.h" | |
27 | #include "hnae3.h" | |
28 | ||
29 | #define HCLGE_NAME "hclge" | |
30 | #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) | |
31 | #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) | |
32 | #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) | |
33 | #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) | |
34 | ||
46a3df9f S |
35 | static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, |
36 | enum hclge_mta_dmac_sel_type mta_mac_sel, | |
37 | bool enable); | |
38 | static int hclge_init_vlan_config(struct hclge_dev *hdev); | |
4ed340ab | 39 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); |
46a3df9f S |
40 | |
41 | static struct hnae3_ae_algo ae_algo; | |
42 | ||
43 | static const struct pci_device_id ae_algo_pci_tbl[] = { | |
44 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | |
45 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | |
46 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | |
47 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | |
48 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | |
49 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | |
50 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | |
e92a0843 | 51 | /* required last entry */ |
46a3df9f S |
52 | {0, } |
53 | }; | |
54 | ||
55 | static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { | |
56 | "Mac Loopback test", | |
57 | "Serdes Loopback test", | |
58 | "Phy Loopback test" | |
59 | }; | |
60 | ||
61 | static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { | |
62 | {"igu_rx_oversize_pkt", | |
63 | HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, | |
64 | {"igu_rx_undersize_pkt", | |
65 | HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, | |
66 | {"igu_rx_out_all_pkt", | |
67 | HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, | |
68 | {"igu_rx_uni_pkt", | |
69 | HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, | |
70 | {"igu_rx_multi_pkt", | |
71 | HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, | |
72 | {"igu_rx_broad_pkt", | |
73 | HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, | |
74 | {"egu_tx_out_all_pkt", | |
75 | HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, | |
76 | {"egu_tx_uni_pkt", | |
77 | HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, | |
78 | {"egu_tx_multi_pkt", | |
79 | HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, | |
80 | {"egu_tx_broad_pkt", | |
81 | HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, | |
82 | {"ssu_ppp_mac_key_num", | |
83 | HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, | |
84 | {"ssu_ppp_host_key_num", | |
85 | HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, | |
86 | {"ppp_ssu_mac_rlt_num", | |
87 | HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, | |
88 | {"ppp_ssu_host_rlt_num", | |
89 | HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, | |
90 | {"ssu_tx_in_num", | |
91 | HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, | |
92 | {"ssu_tx_out_num", | |
93 | HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, | |
94 | {"ssu_rx_in_num", | |
95 | HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, | |
96 | {"ssu_rx_out_num", | |
97 | HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} | |
98 | }; | |
99 | ||
100 | static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { | |
101 | {"igu_rx_err_pkt", | |
102 | HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, | |
103 | {"igu_rx_no_eof_pkt", | |
104 | HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, | |
105 | {"igu_rx_no_sof_pkt", | |
106 | HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, | |
107 | {"egu_tx_1588_pkt", | |
108 | HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, | |
109 | {"ssu_full_drop_num", | |
110 | HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, | |
111 | {"ssu_part_drop_num", | |
112 | HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, | |
113 | {"ppp_key_drop_num", | |
114 | HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, | |
115 | {"ppp_rlt_drop_num", | |
116 | HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, | |
117 | {"ssu_key_drop_num", | |
118 | HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, | |
119 | {"pkt_curr_buf_cnt", | |
120 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, | |
121 | {"qcn_fb_rcv_cnt", | |
122 | HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, | |
123 | {"qcn_fb_drop_cnt", | |
124 | HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, | |
125 | {"qcn_fb_invaild_cnt", | |
126 | HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, | |
127 | {"rx_packet_tc0_in_cnt", | |
128 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, | |
129 | {"rx_packet_tc1_in_cnt", | |
130 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, | |
131 | {"rx_packet_tc2_in_cnt", | |
132 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, | |
133 | {"rx_packet_tc3_in_cnt", | |
134 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, | |
135 | {"rx_packet_tc4_in_cnt", | |
136 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, | |
137 | {"rx_packet_tc5_in_cnt", | |
138 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, | |
139 | {"rx_packet_tc6_in_cnt", | |
140 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, | |
141 | {"rx_packet_tc7_in_cnt", | |
142 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, | |
143 | {"rx_packet_tc0_out_cnt", | |
144 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, | |
145 | {"rx_packet_tc1_out_cnt", | |
146 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, | |
147 | {"rx_packet_tc2_out_cnt", | |
148 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, | |
149 | {"rx_packet_tc3_out_cnt", | |
150 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, | |
151 | {"rx_packet_tc4_out_cnt", | |
152 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, | |
153 | {"rx_packet_tc5_out_cnt", | |
154 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, | |
155 | {"rx_packet_tc6_out_cnt", | |
156 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, | |
157 | {"rx_packet_tc7_out_cnt", | |
158 | HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, | |
159 | {"tx_packet_tc0_in_cnt", | |
160 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, | |
161 | {"tx_packet_tc1_in_cnt", | |
162 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, | |
163 | {"tx_packet_tc2_in_cnt", | |
164 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, | |
165 | {"tx_packet_tc3_in_cnt", | |
166 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, | |
167 | {"tx_packet_tc4_in_cnt", | |
168 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, | |
169 | {"tx_packet_tc5_in_cnt", | |
170 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, | |
171 | {"tx_packet_tc6_in_cnt", | |
172 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, | |
173 | {"tx_packet_tc7_in_cnt", | |
174 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, | |
175 | {"tx_packet_tc0_out_cnt", | |
176 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, | |
177 | {"tx_packet_tc1_out_cnt", | |
178 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, | |
179 | {"tx_packet_tc2_out_cnt", | |
180 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, | |
181 | {"tx_packet_tc3_out_cnt", | |
182 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, | |
183 | {"tx_packet_tc4_out_cnt", | |
184 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, | |
185 | {"tx_packet_tc5_out_cnt", | |
186 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, | |
187 | {"tx_packet_tc6_out_cnt", | |
188 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, | |
189 | {"tx_packet_tc7_out_cnt", | |
190 | HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, | |
191 | {"pkt_curr_buf_tc0_cnt", | |
192 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, | |
193 | {"pkt_curr_buf_tc1_cnt", | |
194 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, | |
195 | {"pkt_curr_buf_tc2_cnt", | |
196 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, | |
197 | {"pkt_curr_buf_tc3_cnt", | |
198 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, | |
199 | {"pkt_curr_buf_tc4_cnt", | |
200 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, | |
201 | {"pkt_curr_buf_tc5_cnt", | |
202 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, | |
203 | {"pkt_curr_buf_tc6_cnt", | |
204 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, | |
205 | {"pkt_curr_buf_tc7_cnt", | |
206 | HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, | |
207 | {"mb_uncopy_num", | |
208 | HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, | |
209 | {"lo_pri_unicast_rlt_drop_num", | |
210 | HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, | |
211 | {"hi_pri_multicast_rlt_drop_num", | |
212 | HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, | |
213 | {"lo_pri_multicast_rlt_drop_num", | |
214 | HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, | |
215 | {"rx_oq_drop_pkt_cnt", | |
216 | HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, | |
217 | {"tx_oq_drop_pkt_cnt", | |
218 | HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, | |
219 | {"nic_l2_err_drop_pkt_cnt", | |
220 | HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, | |
221 | {"roc_l2_err_drop_pkt_cnt", | |
222 | HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} | |
223 | }; | |
224 | ||
225 | static const struct hclge_comm_stats_str g_mac_stats_string[] = { | |
226 | {"mac_tx_mac_pause_num", | |
227 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, | |
228 | {"mac_rx_mac_pause_num", | |
229 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, | |
230 | {"mac_tx_pfc_pri0_pkt_num", | |
231 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, | |
232 | {"mac_tx_pfc_pri1_pkt_num", | |
233 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, | |
234 | {"mac_tx_pfc_pri2_pkt_num", | |
235 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, | |
236 | {"mac_tx_pfc_pri3_pkt_num", | |
237 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, | |
238 | {"mac_tx_pfc_pri4_pkt_num", | |
239 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, | |
240 | {"mac_tx_pfc_pri5_pkt_num", | |
241 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, | |
242 | {"mac_tx_pfc_pri6_pkt_num", | |
243 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, | |
244 | {"mac_tx_pfc_pri7_pkt_num", | |
245 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, | |
246 | {"mac_rx_pfc_pri0_pkt_num", | |
247 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, | |
248 | {"mac_rx_pfc_pri1_pkt_num", | |
249 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, | |
250 | {"mac_rx_pfc_pri2_pkt_num", | |
251 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, | |
252 | {"mac_rx_pfc_pri3_pkt_num", | |
253 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, | |
254 | {"mac_rx_pfc_pri4_pkt_num", | |
255 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, | |
256 | {"mac_rx_pfc_pri5_pkt_num", | |
257 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, | |
258 | {"mac_rx_pfc_pri6_pkt_num", | |
259 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, | |
260 | {"mac_rx_pfc_pri7_pkt_num", | |
261 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, | |
262 | {"mac_tx_total_pkt_num", | |
263 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, | |
264 | {"mac_tx_total_oct_num", | |
265 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, | |
266 | {"mac_tx_good_pkt_num", | |
267 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, | |
268 | {"mac_tx_bad_pkt_num", | |
269 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, | |
270 | {"mac_tx_good_oct_num", | |
271 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, | |
272 | {"mac_tx_bad_oct_num", | |
273 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, | |
274 | {"mac_tx_uni_pkt_num", | |
275 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, | |
276 | {"mac_tx_multi_pkt_num", | |
277 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, | |
278 | {"mac_tx_broad_pkt_num", | |
279 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, | |
280 | {"mac_tx_undersize_pkt_num", | |
281 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, | |
282 | {"mac_tx_overrsize_pkt_num", | |
283 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, | |
284 | {"mac_tx_64_oct_pkt_num", | |
285 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, | |
286 | {"mac_tx_65_127_oct_pkt_num", | |
287 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, | |
288 | {"mac_tx_128_255_oct_pkt_num", | |
289 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, | |
290 | {"mac_tx_256_511_oct_pkt_num", | |
291 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, | |
292 | {"mac_tx_512_1023_oct_pkt_num", | |
293 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, | |
294 | {"mac_tx_1024_1518_oct_pkt_num", | |
295 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, | |
296 | {"mac_tx_1519_max_oct_pkt_num", | |
297 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, | |
298 | {"mac_rx_total_pkt_num", | |
299 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, | |
300 | {"mac_rx_total_oct_num", | |
301 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, | |
302 | {"mac_rx_good_pkt_num", | |
303 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, | |
304 | {"mac_rx_bad_pkt_num", | |
305 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, | |
306 | {"mac_rx_good_oct_num", | |
307 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, | |
308 | {"mac_rx_bad_oct_num", | |
309 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, | |
310 | {"mac_rx_uni_pkt_num", | |
311 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, | |
312 | {"mac_rx_multi_pkt_num", | |
313 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, | |
314 | {"mac_rx_broad_pkt_num", | |
315 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, | |
316 | {"mac_rx_undersize_pkt_num", | |
317 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, | |
318 | {"mac_rx_overrsize_pkt_num", | |
319 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, | |
320 | {"mac_rx_64_oct_pkt_num", | |
321 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, | |
322 | {"mac_rx_65_127_oct_pkt_num", | |
323 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, | |
324 | {"mac_rx_128_255_oct_pkt_num", | |
325 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, | |
326 | {"mac_rx_256_511_oct_pkt_num", | |
327 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, | |
328 | {"mac_rx_512_1023_oct_pkt_num", | |
329 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, | |
330 | {"mac_rx_1024_1518_oct_pkt_num", | |
331 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, | |
332 | {"mac_rx_1519_max_oct_pkt_num", | |
333 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, | |
334 | ||
335 | {"mac_trans_fragment_pkt_num", | |
336 | HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, | |
337 | {"mac_trans_undermin_pkt_num", | |
338 | HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, | |
339 | {"mac_trans_jabber_pkt_num", | |
340 | HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, | |
341 | {"mac_trans_err_all_pkt_num", | |
342 | HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, | |
343 | {"mac_trans_from_app_good_pkt_num", | |
344 | HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, | |
345 | {"mac_trans_from_app_bad_pkt_num", | |
346 | HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, | |
347 | {"mac_rcv_fragment_pkt_num", | |
348 | HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, | |
349 | {"mac_rcv_undermin_pkt_num", | |
350 | HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, | |
351 | {"mac_rcv_jabber_pkt_num", | |
352 | HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, | |
353 | {"mac_rcv_fcs_err_pkt_num", | |
354 | HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, | |
355 | {"mac_rcv_send_app_good_pkt_num", | |
356 | HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, | |
357 | {"mac_rcv_send_app_bad_pkt_num", | |
358 | HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} | |
359 | }; | |
360 | ||
361 | static int hclge_64_bit_update_stats(struct hclge_dev *hdev) | |
362 | { | |
363 | #define HCLGE_64_BIT_CMD_NUM 5 | |
364 | #define HCLGE_64_BIT_RTN_DATANUM 4 | |
365 | u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); | |
366 | struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; | |
a90bb9a5 | 367 | __le64 *desc_data; |
46a3df9f S |
368 | int i, k, n; |
369 | int ret; | |
370 | ||
371 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); | |
372 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); | |
373 | if (ret) { | |
374 | dev_err(&hdev->pdev->dev, | |
375 | "Get 64 bit pkt stats fail, status = %d.\n", ret); | |
376 | return ret; | |
377 | } | |
378 | ||
379 | for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { | |
380 | if (unlikely(i == 0)) { | |
a90bb9a5 | 381 | desc_data = (__le64 *)(&desc[i].data[0]); |
46a3df9f S |
382 | n = HCLGE_64_BIT_RTN_DATANUM - 1; |
383 | } else { | |
a90bb9a5 | 384 | desc_data = (__le64 *)(&desc[i]); |
46a3df9f S |
385 | n = HCLGE_64_BIT_RTN_DATANUM; |
386 | } | |
387 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 388 | *data++ += le64_to_cpu(*desc_data); |
46a3df9f S |
389 | desc_data++; |
390 | } | |
391 | } | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) | |
397 | { | |
398 | stats->pkt_curr_buf_cnt = 0; | |
399 | stats->pkt_curr_buf_tc0_cnt = 0; | |
400 | stats->pkt_curr_buf_tc1_cnt = 0; | |
401 | stats->pkt_curr_buf_tc2_cnt = 0; | |
402 | stats->pkt_curr_buf_tc3_cnt = 0; | |
403 | stats->pkt_curr_buf_tc4_cnt = 0; | |
404 | stats->pkt_curr_buf_tc5_cnt = 0; | |
405 | stats->pkt_curr_buf_tc6_cnt = 0; | |
406 | stats->pkt_curr_buf_tc7_cnt = 0; | |
407 | } | |
408 | ||
409 | static int hclge_32_bit_update_stats(struct hclge_dev *hdev) | |
410 | { | |
411 | #define HCLGE_32_BIT_CMD_NUM 8 | |
412 | #define HCLGE_32_BIT_RTN_DATANUM 8 | |
413 | ||
414 | struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; | |
415 | struct hclge_32_bit_stats *all_32_bit_stats; | |
a90bb9a5 | 416 | __le32 *desc_data; |
46a3df9f S |
417 | int i, k, n; |
418 | u64 *data; | |
419 | int ret; | |
420 | ||
421 | all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; | |
422 | data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); | |
423 | ||
424 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); | |
425 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); | |
426 | if (ret) { | |
427 | dev_err(&hdev->pdev->dev, | |
428 | "Get 32 bit pkt stats fail, status = %d.\n", ret); | |
429 | ||
430 | return ret; | |
431 | } | |
432 | ||
433 | hclge_reset_partial_32bit_counter(all_32_bit_stats); | |
434 | for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { | |
435 | if (unlikely(i == 0)) { | |
a90bb9a5 YL |
436 | __le16 *desc_data_16bit; |
437 | ||
46a3df9f | 438 | all_32_bit_stats->igu_rx_err_pkt += |
a90bb9a5 YL |
439 | le32_to_cpu(desc[i].data[0]); |
440 | ||
441 | desc_data_16bit = (__le16 *)&desc[i].data[1]; | |
46a3df9f | 442 | all_32_bit_stats->igu_rx_no_eof_pkt += |
a90bb9a5 YL |
443 | le16_to_cpu(*desc_data_16bit); |
444 | ||
445 | desc_data_16bit++; | |
46a3df9f | 446 | all_32_bit_stats->igu_rx_no_sof_pkt += |
a90bb9a5 | 447 | le16_to_cpu(*desc_data_16bit); |
46a3df9f | 448 | |
a90bb9a5 | 449 | desc_data = &desc[i].data[2]; |
46a3df9f S |
450 | n = HCLGE_32_BIT_RTN_DATANUM - 4; |
451 | } else { | |
a90bb9a5 | 452 | desc_data = (__le32 *)&desc[i]; |
46a3df9f S |
453 | n = HCLGE_32_BIT_RTN_DATANUM; |
454 | } | |
455 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 456 | *data++ += le32_to_cpu(*desc_data); |
46a3df9f S |
457 | desc_data++; |
458 | } | |
459 | } | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | static int hclge_mac_update_stats(struct hclge_dev *hdev) | |
465 | { | |
466 | #define HCLGE_MAC_CMD_NUM 17 | |
467 | #define HCLGE_RTN_DATA_NUM 4 | |
468 | ||
469 | u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); | |
470 | struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; | |
a90bb9a5 | 471 | __le64 *desc_data; |
46a3df9f S |
472 | int i, k, n; |
473 | int ret; | |
474 | ||
475 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); | |
476 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); | |
477 | if (ret) { | |
478 | dev_err(&hdev->pdev->dev, | |
479 | "Get MAC pkt stats fail, status = %d.\n", ret); | |
480 | ||
481 | return ret; | |
482 | } | |
483 | ||
484 | for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { | |
485 | if (unlikely(i == 0)) { | |
a90bb9a5 | 486 | desc_data = (__le64 *)(&desc[i].data[0]); |
46a3df9f S |
487 | n = HCLGE_RTN_DATA_NUM - 2; |
488 | } else { | |
a90bb9a5 | 489 | desc_data = (__le64 *)(&desc[i]); |
46a3df9f S |
490 | n = HCLGE_RTN_DATA_NUM; |
491 | } | |
492 | for (k = 0; k < n; k++) { | |
a90bb9a5 | 493 | *data++ += le64_to_cpu(*desc_data); |
46a3df9f S |
494 | desc_data++; |
495 | } | |
496 | } | |
497 | ||
498 | return 0; | |
499 | } | |
500 | ||
501 | static int hclge_tqps_update_stats(struct hnae3_handle *handle) | |
502 | { | |
503 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
504 | struct hclge_vport *vport = hclge_get_vport(handle); | |
505 | struct hclge_dev *hdev = vport->back; | |
506 | struct hnae3_queue *queue; | |
507 | struct hclge_desc desc[1]; | |
508 | struct hclge_tqp *tqp; | |
509 | int ret, i; | |
510 | ||
511 | for (i = 0; i < kinfo->num_tqps; i++) { | |
512 | queue = handle->kinfo.tqp[i]; | |
513 | tqp = container_of(queue, struct hclge_tqp, q); | |
514 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
515 | hclge_cmd_setup_basic_desc(&desc[0], | |
516 | HCLGE_OPC_QUERY_RX_STATUS, | |
517 | true); | |
518 | ||
a90bb9a5 | 519 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
520 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
521 | if (ret) { | |
522 | dev_err(&hdev->pdev->dev, | |
523 | "Query tqp stat fail, status = %d,queue = %d\n", | |
524 | ret, i); | |
525 | return ret; | |
526 | } | |
527 | tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += | |
a90bb9a5 | 528 | le32_to_cpu(desc[0].data[4]); |
46a3df9f S |
529 | } |
530 | ||
531 | for (i = 0; i < kinfo->num_tqps; i++) { | |
532 | queue = handle->kinfo.tqp[i]; | |
533 | tqp = container_of(queue, struct hclge_tqp, q); | |
534 | /* command : HCLGE_OPC_QUERY_IGU_STAT */ | |
535 | hclge_cmd_setup_basic_desc(&desc[0], | |
536 | HCLGE_OPC_QUERY_TX_STATUS, | |
537 | true); | |
538 | ||
a90bb9a5 | 539 | desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); |
46a3df9f S |
540 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
541 | if (ret) { | |
542 | dev_err(&hdev->pdev->dev, | |
543 | "Query tqp stat fail, status = %d,queue = %d\n", | |
544 | ret, i); | |
545 | return ret; | |
546 | } | |
547 | tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += | |
a90bb9a5 | 548 | le32_to_cpu(desc[0].data[4]); |
46a3df9f S |
549 | } |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) | |
555 | { | |
556 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
557 | struct hclge_tqp *tqp; | |
558 | u64 *buff = data; | |
559 | int i; | |
560 | ||
561 | for (i = 0; i < kinfo->num_tqps; i++) { | |
562 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 563 | *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; |
46a3df9f S |
564 | } |
565 | ||
566 | for (i = 0; i < kinfo->num_tqps; i++) { | |
567 | tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
a90bb9a5 | 568 | *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; |
46a3df9f S |
569 | } |
570 | ||
571 | return buff; | |
572 | } | |
573 | ||
574 | static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) | |
575 | { | |
576 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
577 | ||
578 | return kinfo->num_tqps * (2); | |
579 | } | |
580 | ||
581 | static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) | |
582 | { | |
583 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
584 | u8 *buff = data; | |
585 | int i = 0; | |
586 | ||
587 | for (i = 0; i < kinfo->num_tqps; i++) { | |
588 | struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], | |
589 | struct hclge_tqp, q); | |
590 | snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", | |
591 | tqp->index); | |
592 | buff = buff + ETH_GSTRING_LEN; | |
593 | } | |
594 | ||
595 | for (i = 0; i < kinfo->num_tqps; i++) { | |
596 | struct hclge_tqp *tqp = container_of(kinfo->tqp[i], | |
597 | struct hclge_tqp, q); | |
598 | snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", | |
599 | tqp->index); | |
600 | buff = buff + ETH_GSTRING_LEN; | |
601 | } | |
602 | ||
603 | return buff; | |
604 | } | |
605 | ||
606 | static u64 *hclge_comm_get_stats(void *comm_stats, | |
607 | const struct hclge_comm_stats_str strs[], | |
608 | int size, u64 *data) | |
609 | { | |
610 | u64 *buf = data; | |
611 | u32 i; | |
612 | ||
613 | for (i = 0; i < size; i++) | |
614 | buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); | |
615 | ||
616 | return buf + size; | |
617 | } | |
618 | ||
619 | static u8 *hclge_comm_get_strings(u32 stringset, | |
620 | const struct hclge_comm_stats_str strs[], | |
621 | int size, u8 *data) | |
622 | { | |
623 | char *buff = (char *)data; | |
624 | u32 i; | |
625 | ||
626 | if (stringset != ETH_SS_STATS) | |
627 | return buff; | |
628 | ||
629 | for (i = 0; i < size; i++) { | |
630 | snprintf(buff, ETH_GSTRING_LEN, | |
631 | strs[i].desc); | |
632 | buff = buff + ETH_GSTRING_LEN; | |
633 | } | |
634 | ||
635 | return (u8 *)buff; | |
636 | } | |
637 | ||
638 | static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, | |
639 | struct net_device_stats *net_stats) | |
640 | { | |
641 | net_stats->tx_dropped = 0; | |
642 | net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; | |
643 | net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; | |
644 | net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; | |
645 | ||
646 | net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; | |
647 | net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; | |
648 | net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; | |
649 | net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; | |
650 | net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; | |
651 | net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; | |
652 | ||
653 | net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; | |
654 | net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; | |
655 | ||
656 | net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; | |
657 | net_stats->rx_length_errors = | |
658 | hw_stats->mac_stats.mac_rx_undersize_pkt_num; | |
659 | net_stats->rx_length_errors += | |
660 | hw_stats->mac_stats.mac_rx_overrsize_pkt_num; | |
661 | net_stats->rx_over_errors = | |
662 | hw_stats->mac_stats.mac_rx_overrsize_pkt_num; | |
663 | } | |
664 | ||
665 | static void hclge_update_stats_for_all(struct hclge_dev *hdev) | |
666 | { | |
667 | struct hnae3_handle *handle; | |
668 | int status; | |
669 | ||
670 | handle = &hdev->vport[0].nic; | |
671 | if (handle->client) { | |
672 | status = hclge_tqps_update_stats(handle); | |
673 | if (status) { | |
674 | dev_err(&hdev->pdev->dev, | |
675 | "Update TQPS stats fail, status = %d.\n", | |
676 | status); | |
677 | } | |
678 | } | |
679 | ||
680 | status = hclge_mac_update_stats(hdev); | |
681 | if (status) | |
682 | dev_err(&hdev->pdev->dev, | |
683 | "Update MAC stats fail, status = %d.\n", status); | |
684 | ||
685 | status = hclge_32_bit_update_stats(hdev); | |
686 | if (status) | |
687 | dev_err(&hdev->pdev->dev, | |
688 | "Update 32 bit stats fail, status = %d.\n", | |
689 | status); | |
690 | ||
691 | hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); | |
692 | } | |
693 | ||
694 | static void hclge_update_stats(struct hnae3_handle *handle, | |
695 | struct net_device_stats *net_stats) | |
696 | { | |
697 | struct hclge_vport *vport = hclge_get_vport(handle); | |
698 | struct hclge_dev *hdev = vport->back; | |
699 | struct hclge_hw_stats *hw_stats = &hdev->hw_stats; | |
700 | int status; | |
701 | ||
702 | status = hclge_mac_update_stats(hdev); | |
703 | if (status) | |
704 | dev_err(&hdev->pdev->dev, | |
705 | "Update MAC stats fail, status = %d.\n", | |
706 | status); | |
707 | ||
708 | status = hclge_32_bit_update_stats(hdev); | |
709 | if (status) | |
710 | dev_err(&hdev->pdev->dev, | |
711 | "Update 32 bit stats fail, status = %d.\n", | |
712 | status); | |
713 | ||
714 | status = hclge_64_bit_update_stats(hdev); | |
715 | if (status) | |
716 | dev_err(&hdev->pdev->dev, | |
717 | "Update 64 bit stats fail, status = %d.\n", | |
718 | status); | |
719 | ||
720 | status = hclge_tqps_update_stats(handle); | |
721 | if (status) | |
722 | dev_err(&hdev->pdev->dev, | |
723 | "Update TQPS stats fail, status = %d.\n", | |
724 | status); | |
725 | ||
726 | hclge_update_netstat(hw_stats, net_stats); | |
727 | } | |
728 | ||
729 | static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) | |
730 | { | |
731 | #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 | |
732 | ||
733 | struct hclge_vport *vport = hclge_get_vport(handle); | |
734 | struct hclge_dev *hdev = vport->back; | |
735 | int count = 0; | |
736 | ||
737 | /* Loopback test support rules: | |
738 | * mac: only GE mode support | |
739 | * serdes: all mac mode will support include GE/XGE/LGE/CGE | |
740 | * phy: only support when phy device exist on board | |
741 | */ | |
742 | if (stringset == ETH_SS_TEST) { | |
743 | /* clear loopback bit flags at first */ | |
744 | handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); | |
745 | if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || | |
746 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || | |
747 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { | |
748 | count += 1; | |
749 | handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; | |
750 | } else { | |
751 | count = -EOPNOTSUPP; | |
752 | } | |
753 | } else if (stringset == ETH_SS_STATS) { | |
754 | count = ARRAY_SIZE(g_mac_stats_string) + | |
755 | ARRAY_SIZE(g_all_32bit_stats_string) + | |
756 | ARRAY_SIZE(g_all_64bit_stats_string) + | |
757 | hclge_tqps_get_sset_count(handle, stringset); | |
758 | } | |
759 | ||
760 | return count; | |
761 | } | |
762 | ||
763 | static void hclge_get_strings(struct hnae3_handle *handle, | |
764 | u32 stringset, | |
765 | u8 *data) | |
766 | { | |
767 | u8 *p = (char *)data; | |
768 | int size; | |
769 | ||
770 | if (stringset == ETH_SS_STATS) { | |
771 | size = ARRAY_SIZE(g_mac_stats_string); | |
772 | p = hclge_comm_get_strings(stringset, | |
773 | g_mac_stats_string, | |
774 | size, | |
775 | p); | |
776 | size = ARRAY_SIZE(g_all_32bit_stats_string); | |
777 | p = hclge_comm_get_strings(stringset, | |
778 | g_all_32bit_stats_string, | |
779 | size, | |
780 | p); | |
781 | size = ARRAY_SIZE(g_all_64bit_stats_string); | |
782 | p = hclge_comm_get_strings(stringset, | |
783 | g_all_64bit_stats_string, | |
784 | size, | |
785 | p); | |
786 | p = hclge_tqps_get_strings(handle, p); | |
787 | } else if (stringset == ETH_SS_TEST) { | |
788 | if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { | |
789 | memcpy(p, | |
790 | hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], | |
791 | ETH_GSTRING_LEN); | |
792 | p += ETH_GSTRING_LEN; | |
793 | } | |
794 | if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { | |
795 | memcpy(p, | |
796 | hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], | |
797 | ETH_GSTRING_LEN); | |
798 | p += ETH_GSTRING_LEN; | |
799 | } | |
800 | if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { | |
801 | memcpy(p, | |
802 | hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], | |
803 | ETH_GSTRING_LEN); | |
804 | p += ETH_GSTRING_LEN; | |
805 | } | |
806 | } | |
807 | } | |
808 | ||
809 | static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) | |
810 | { | |
811 | struct hclge_vport *vport = hclge_get_vport(handle); | |
812 | struct hclge_dev *hdev = vport->back; | |
813 | u64 *p; | |
814 | ||
815 | p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, | |
816 | g_mac_stats_string, | |
817 | ARRAY_SIZE(g_mac_stats_string), | |
818 | data); | |
819 | p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, | |
820 | g_all_32bit_stats_string, | |
821 | ARRAY_SIZE(g_all_32bit_stats_string), | |
822 | p); | |
823 | p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, | |
824 | g_all_64bit_stats_string, | |
825 | ARRAY_SIZE(g_all_64bit_stats_string), | |
826 | p); | |
827 | p = hclge_tqps_get_stats(handle, p); | |
828 | } | |
829 | ||
830 | static int hclge_parse_func_status(struct hclge_dev *hdev, | |
d44f9b63 | 831 | struct hclge_func_status_cmd *status) |
46a3df9f S |
832 | { |
833 | if (!(status->pf_state & HCLGE_PF_STATE_DONE)) | |
834 | return -EINVAL; | |
835 | ||
836 | /* Set the pf to main pf */ | |
837 | if (status->pf_state & HCLGE_PF_STATE_MAIN) | |
838 | hdev->flag |= HCLGE_FLAG_MAIN; | |
839 | else | |
840 | hdev->flag &= ~HCLGE_FLAG_MAIN; | |
841 | ||
46a3df9f S |
842 | return 0; |
843 | } | |
844 | ||
845 | static int hclge_query_function_status(struct hclge_dev *hdev) | |
846 | { | |
d44f9b63 | 847 | struct hclge_func_status_cmd *req; |
46a3df9f S |
848 | struct hclge_desc desc; |
849 | int timeout = 0; | |
850 | int ret; | |
851 | ||
852 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); | |
d44f9b63 | 853 | req = (struct hclge_func_status_cmd *)desc.data; |
46a3df9f S |
854 | |
855 | do { | |
856 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
857 | if (ret) { | |
858 | dev_err(&hdev->pdev->dev, | |
859 | "query function status failed %d.\n", | |
860 | ret); | |
861 | ||
862 | return ret; | |
863 | } | |
864 | ||
865 | /* Check pf reset is done */ | |
866 | if (req->pf_state) | |
867 | break; | |
868 | usleep_range(1000, 2000); | |
869 | } while (timeout++ < 5); | |
870 | ||
871 | ret = hclge_parse_func_status(hdev, req); | |
872 | ||
873 | return ret; | |
874 | } | |
875 | ||
876 | static int hclge_query_pf_resource(struct hclge_dev *hdev) | |
877 | { | |
d44f9b63 | 878 | struct hclge_pf_res_cmd *req; |
46a3df9f S |
879 | struct hclge_desc desc; |
880 | int ret; | |
881 | ||
882 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); | |
883 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
884 | if (ret) { | |
885 | dev_err(&hdev->pdev->dev, | |
886 | "query pf resource failed %d.\n", ret); | |
887 | return ret; | |
888 | } | |
889 | ||
d44f9b63 | 890 | req = (struct hclge_pf_res_cmd *)desc.data; |
46a3df9f S |
891 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); |
892 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; | |
893 | ||
e92a0843 | 894 | if (hnae3_dev_roce_supported(hdev)) { |
887c3820 | 895 | hdev->num_roce_msi = |
46a3df9f S |
896 | hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
897 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
898 | ||
899 | /* PF should have NIC vectors and Roce vectors, | |
900 | * NIC vectors are queued before Roce vectors. | |
901 | */ | |
887c3820 | 902 | hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; |
46a3df9f S |
903 | } else { |
904 | hdev->num_msi = | |
905 | hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), | |
906 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | |
907 | } | |
908 | ||
909 | return 0; | |
910 | } | |
911 | ||
912 | static int hclge_parse_speed(int speed_cmd, int *speed) | |
913 | { | |
914 | switch (speed_cmd) { | |
915 | case 6: | |
916 | *speed = HCLGE_MAC_SPEED_10M; | |
917 | break; | |
918 | case 7: | |
919 | *speed = HCLGE_MAC_SPEED_100M; | |
920 | break; | |
921 | case 0: | |
922 | *speed = HCLGE_MAC_SPEED_1G; | |
923 | break; | |
924 | case 1: | |
925 | *speed = HCLGE_MAC_SPEED_10G; | |
926 | break; | |
927 | case 2: | |
928 | *speed = HCLGE_MAC_SPEED_25G; | |
929 | break; | |
930 | case 3: | |
931 | *speed = HCLGE_MAC_SPEED_40G; | |
932 | break; | |
933 | case 4: | |
934 | *speed = HCLGE_MAC_SPEED_50G; | |
935 | break; | |
936 | case 5: | |
937 | *speed = HCLGE_MAC_SPEED_100G; | |
938 | break; | |
939 | default: | |
940 | return -EINVAL; | |
941 | } | |
942 | ||
943 | return 0; | |
944 | } | |
945 | ||
946 | static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) | |
947 | { | |
d44f9b63 | 948 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
949 | u64 mac_addr_tmp_high; |
950 | u64 mac_addr_tmp; | |
951 | int i; | |
952 | ||
d44f9b63 | 953 | req = (struct hclge_cfg_param_cmd *)desc[0].data; |
46a3df9f S |
954 | |
955 | /* get the configuration */ | |
956 | cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), | |
957 | HCLGE_CFG_VMDQ_M, | |
958 | HCLGE_CFG_VMDQ_S); | |
959 | cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), | |
960 | HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); | |
961 | cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), | |
962 | HCLGE_CFG_TQP_DESC_N_M, | |
963 | HCLGE_CFG_TQP_DESC_N_S); | |
964 | ||
965 | cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), | |
966 | HCLGE_CFG_PHY_ADDR_M, | |
967 | HCLGE_CFG_PHY_ADDR_S); | |
968 | cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), | |
969 | HCLGE_CFG_MEDIA_TP_M, | |
970 | HCLGE_CFG_MEDIA_TP_S); | |
971 | cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), | |
972 | HCLGE_CFG_RX_BUF_LEN_M, | |
973 | HCLGE_CFG_RX_BUF_LEN_S); | |
974 | /* get mac_address */ | |
975 | mac_addr_tmp = __le32_to_cpu(req->param[2]); | |
976 | mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), | |
977 | HCLGE_CFG_MAC_ADDR_H_M, | |
978 | HCLGE_CFG_MAC_ADDR_H_S); | |
979 | ||
980 | mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; | |
981 | ||
982 | cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), | |
983 | HCLGE_CFG_DEFAULT_SPEED_M, | |
984 | HCLGE_CFG_DEFAULT_SPEED_S); | |
c408e202 PL |
985 | cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), |
986 | HCLGE_CFG_RSS_SIZE_M, | |
987 | HCLGE_CFG_RSS_SIZE_S); | |
988 | ||
46a3df9f S |
989 | for (i = 0; i < ETH_ALEN; i++) |
990 | cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; | |
991 | ||
d44f9b63 | 992 | req = (struct hclge_cfg_param_cmd *)desc[1].data; |
46a3df9f S |
993 | cfg->numa_node_map = __le32_to_cpu(req->param[0]); |
994 | } | |
995 | ||
996 | /* hclge_get_cfg: query the static parameter from flash | |
997 | * @hdev: pointer to struct hclge_dev | |
998 | * @hcfg: the config structure to be getted | |
999 | */ | |
1000 | static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) | |
1001 | { | |
1002 | struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; | |
d44f9b63 | 1003 | struct hclge_cfg_param_cmd *req; |
46a3df9f S |
1004 | int i, ret; |
1005 | ||
1006 | for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { | |
a90bb9a5 YL |
1007 | u32 offset = 0; |
1008 | ||
d44f9b63 | 1009 | req = (struct hclge_cfg_param_cmd *)desc[i].data; |
46a3df9f S |
1010 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, |
1011 | true); | |
a90bb9a5 | 1012 | hnae_set_field(offset, HCLGE_CFG_OFFSET_M, |
46a3df9f S |
1013 | HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); |
1014 | /* Len should be united by 4 bytes when send to hardware */ | |
a90bb9a5 | 1015 | hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, |
46a3df9f | 1016 | HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); |
a90bb9a5 | 1017 | req->offset = cpu_to_le32(offset); |
46a3df9f S |
1018 | } |
1019 | ||
1020 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); | |
1021 | if (ret) { | |
1022 | dev_err(&hdev->pdev->dev, | |
1023 | "get config failed %d.\n", ret); | |
1024 | return ret; | |
1025 | } | |
1026 | ||
1027 | hclge_parse_cfg(hcfg, desc); | |
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | static int hclge_get_cap(struct hclge_dev *hdev) | |
1032 | { | |
1033 | int ret; | |
1034 | ||
1035 | ret = hclge_query_function_status(hdev); | |
1036 | if (ret) { | |
1037 | dev_err(&hdev->pdev->dev, | |
1038 | "query function status error %d.\n", ret); | |
1039 | return ret; | |
1040 | } | |
1041 | ||
1042 | /* get pf resource */ | |
1043 | ret = hclge_query_pf_resource(hdev); | |
1044 | if (ret) { | |
1045 | dev_err(&hdev->pdev->dev, | |
1046 | "query pf resource error %d.\n", ret); | |
1047 | return ret; | |
1048 | } | |
1049 | ||
1050 | return 0; | |
1051 | } | |
1052 | ||
1053 | static int hclge_configure(struct hclge_dev *hdev) | |
1054 | { | |
1055 | struct hclge_cfg cfg; | |
1056 | int ret, i; | |
1057 | ||
1058 | ret = hclge_get_cfg(hdev, &cfg); | |
1059 | if (ret) { | |
1060 | dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); | |
1061 | return ret; | |
1062 | } | |
1063 | ||
1064 | hdev->num_vmdq_vport = cfg.vmdq_vport_num; | |
1065 | hdev->base_tqp_pid = 0; | |
c408e202 | 1066 | hdev->rss_size_max = cfg.rss_size_max; |
46a3df9f | 1067 | hdev->rx_buf_len = cfg.rx_buf_len; |
fbbb1536 | 1068 | ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); |
46a3df9f | 1069 | hdev->hw.mac.media_type = cfg.media_type; |
2a4776e1 | 1070 | hdev->hw.mac.phy_addr = cfg.phy_addr; |
46a3df9f S |
1071 | hdev->num_desc = cfg.tqp_desc_num; |
1072 | hdev->tm_info.num_pg = 1; | |
cacde272 | 1073 | hdev->tc_max = cfg.tc_num; |
46a3df9f S |
1074 | hdev->tm_info.hw_pfc_map = 0; |
1075 | ||
1076 | ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); | |
1077 | if (ret) { | |
1078 | dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); | |
1079 | return ret; | |
1080 | } | |
1081 | ||
cacde272 YL |
1082 | if ((hdev->tc_max > HNAE3_MAX_TC) || |
1083 | (hdev->tc_max < 1)) { | |
46a3df9f | 1084 | dev_warn(&hdev->pdev->dev, "TC num = %d.\n", |
cacde272 YL |
1085 | hdev->tc_max); |
1086 | hdev->tc_max = 1; | |
46a3df9f S |
1087 | } |
1088 | ||
cacde272 YL |
1089 | /* Dev does not support DCB */ |
1090 | if (!hnae3_dev_dcb_supported(hdev)) { | |
1091 | hdev->tc_max = 1; | |
1092 | hdev->pfc_max = 0; | |
1093 | } else { | |
1094 | hdev->pfc_max = hdev->tc_max; | |
1095 | } | |
1096 | ||
1097 | hdev->tm_info.num_tc = hdev->tc_max; | |
1098 | ||
46a3df9f | 1099 | /* Currently not support uncontiuous tc */ |
cacde272 | 1100 | for (i = 0; i < hdev->tm_info.num_tc; i++) |
46a3df9f S |
1101 | hnae_set_bit(hdev->hw_tc_map, i, 1); |
1102 | ||
1103 | if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) | |
1104 | hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; | |
1105 | else | |
1106 | hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE; | |
1107 | ||
1108 | return ret; | |
1109 | } | |
1110 | ||
1111 | static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, | |
1112 | int tso_mss_max) | |
1113 | { | |
d44f9b63 | 1114 | struct hclge_cfg_tso_status_cmd *req; |
46a3df9f | 1115 | struct hclge_desc desc; |
a90bb9a5 | 1116 | u16 tso_mss; |
46a3df9f S |
1117 | |
1118 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); | |
1119 | ||
d44f9b63 | 1120 | req = (struct hclge_cfg_tso_status_cmd *)desc.data; |
a90bb9a5 YL |
1121 | |
1122 | tso_mss = 0; | |
1123 | hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, | |
46a3df9f | 1124 | HCLGE_TSO_MSS_MIN_S, tso_mss_min); |
a90bb9a5 YL |
1125 | req->tso_mss_min = cpu_to_le16(tso_mss); |
1126 | ||
1127 | tso_mss = 0; | |
1128 | hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, | |
46a3df9f | 1129 | HCLGE_TSO_MSS_MIN_S, tso_mss_max); |
a90bb9a5 | 1130 | req->tso_mss_max = cpu_to_le16(tso_mss); |
46a3df9f S |
1131 | |
1132 | return hclge_cmd_send(&hdev->hw, &desc, 1); | |
1133 | } | |
1134 | ||
1135 | static int hclge_alloc_tqps(struct hclge_dev *hdev) | |
1136 | { | |
1137 | struct hclge_tqp *tqp; | |
1138 | int i; | |
1139 | ||
1140 | hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, | |
1141 | sizeof(struct hclge_tqp), GFP_KERNEL); | |
1142 | if (!hdev->htqp) | |
1143 | return -ENOMEM; | |
1144 | ||
1145 | tqp = hdev->htqp; | |
1146 | ||
1147 | for (i = 0; i < hdev->num_tqps; i++) { | |
1148 | tqp->dev = &hdev->pdev->dev; | |
1149 | tqp->index = i; | |
1150 | ||
1151 | tqp->q.ae_algo = &ae_algo; | |
1152 | tqp->q.buf_size = hdev->rx_buf_len; | |
1153 | tqp->q.desc_num = hdev->num_desc; | |
1154 | tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + | |
1155 | i * HCLGE_TQP_REG_SIZE; | |
1156 | ||
1157 | tqp++; | |
1158 | } | |
1159 | ||
1160 | return 0; | |
1161 | } | |
1162 | ||
1163 | static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, | |
1164 | u16 tqp_pid, u16 tqp_vid, bool is_pf) | |
1165 | { | |
d44f9b63 | 1166 | struct hclge_tqp_map_cmd *req; |
46a3df9f S |
1167 | struct hclge_desc desc; |
1168 | int ret; | |
1169 | ||
1170 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); | |
1171 | ||
d44f9b63 | 1172 | req = (struct hclge_tqp_map_cmd *)desc.data; |
46a3df9f | 1173 | req->tqp_id = cpu_to_le16(tqp_pid); |
a90bb9a5 | 1174 | req->tqp_vf = func_id; |
46a3df9f S |
1175 | req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | |
1176 | 1 << HCLGE_TQP_MAP_EN_B; | |
1177 | req->tqp_vid = cpu_to_le16(tqp_vid); | |
1178 | ||
1179 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1180 | if (ret) { | |
1181 | dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", | |
1182 | ret); | |
1183 | return ret; | |
1184 | } | |
1185 | ||
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | static int hclge_assign_tqp(struct hclge_vport *vport, | |
1190 | struct hnae3_queue **tqp, u16 num_tqps) | |
1191 | { | |
1192 | struct hclge_dev *hdev = vport->back; | |
7df7dad6 | 1193 | int i, alloced; |
46a3df9f S |
1194 | |
1195 | for (i = 0, alloced = 0; i < hdev->num_tqps && | |
1196 | alloced < num_tqps; i++) { | |
1197 | if (!hdev->htqp[i].alloced) { | |
1198 | hdev->htqp[i].q.handle = &vport->nic; | |
1199 | hdev->htqp[i].q.tqp_index = alloced; | |
1200 | tqp[alloced] = &hdev->htqp[i].q; | |
1201 | hdev->htqp[i].alloced = true; | |
46a3df9f S |
1202 | alloced++; |
1203 | } | |
1204 | } | |
1205 | vport->alloc_tqps = num_tqps; | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) | |
1211 | { | |
1212 | struct hnae3_handle *nic = &vport->nic; | |
1213 | struct hnae3_knic_private_info *kinfo = &nic->kinfo; | |
1214 | struct hclge_dev *hdev = vport->back; | |
1215 | int i, ret; | |
1216 | ||
1217 | kinfo->num_desc = hdev->num_desc; | |
1218 | kinfo->rx_buf_len = hdev->rx_buf_len; | |
1219 | kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); | |
1220 | kinfo->rss_size | |
1221 | = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); | |
1222 | kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; | |
1223 | ||
1224 | for (i = 0; i < HNAE3_MAX_TC; i++) { | |
1225 | if (hdev->hw_tc_map & BIT(i)) { | |
1226 | kinfo->tc_info[i].enable = true; | |
1227 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; | |
1228 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; | |
1229 | kinfo->tc_info[i].tc = i; | |
1230 | } else { | |
1231 | /* Set to default queue if TC is disable */ | |
1232 | kinfo->tc_info[i].enable = false; | |
1233 | kinfo->tc_info[i].tqp_offset = 0; | |
1234 | kinfo->tc_info[i].tqp_count = 1; | |
1235 | kinfo->tc_info[i].tc = 0; | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, | |
1240 | sizeof(struct hnae3_queue *), GFP_KERNEL); | |
1241 | if (!kinfo->tqp) | |
1242 | return -ENOMEM; | |
1243 | ||
1244 | ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); | |
1245 | if (ret) { | |
1246 | dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); | |
1247 | return -EINVAL; | |
1248 | } | |
1249 | ||
1250 | return 0; | |
1251 | } | |
1252 | ||
7df7dad6 L |
1253 | static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, |
1254 | struct hclge_vport *vport) | |
1255 | { | |
1256 | struct hnae3_handle *nic = &vport->nic; | |
1257 | struct hnae3_knic_private_info *kinfo; | |
1258 | u16 i; | |
1259 | ||
1260 | kinfo = &nic->kinfo; | |
1261 | for (i = 0; i < kinfo->num_tqps; i++) { | |
1262 | struct hclge_tqp *q = | |
1263 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
1264 | bool is_pf; | |
1265 | int ret; | |
1266 | ||
1267 | is_pf = !(vport->vport_id); | |
1268 | ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, | |
1269 | i, is_pf); | |
1270 | if (ret) | |
1271 | return ret; | |
1272 | } | |
1273 | ||
1274 | return 0; | |
1275 | } | |
1276 | ||
1277 | static int hclge_map_tqp(struct hclge_dev *hdev) | |
1278 | { | |
1279 | struct hclge_vport *vport = hdev->vport; | |
1280 | u16 i, num_vport; | |
1281 | ||
1282 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1283 | for (i = 0; i < num_vport; i++) { | |
1284 | int ret; | |
1285 | ||
1286 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
1287 | if (ret) | |
1288 | return ret; | |
1289 | ||
1290 | vport++; | |
1291 | } | |
1292 | ||
1293 | return 0; | |
1294 | } | |
1295 | ||
46a3df9f S |
1296 | static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) |
1297 | { | |
1298 | /* this would be initialized later */ | |
1299 | } | |
1300 | ||
1301 | static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) | |
1302 | { | |
1303 | struct hnae3_handle *nic = &vport->nic; | |
1304 | struct hclge_dev *hdev = vport->back; | |
1305 | int ret; | |
1306 | ||
1307 | nic->pdev = hdev->pdev; | |
1308 | nic->ae_algo = &ae_algo; | |
1309 | nic->numa_node_mask = hdev->numa_node_mask; | |
1310 | ||
1311 | if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { | |
1312 | ret = hclge_knic_setup(vport, num_tqps); | |
1313 | if (ret) { | |
1314 | dev_err(&hdev->pdev->dev, "knic setup failed %d\n", | |
1315 | ret); | |
1316 | return ret; | |
1317 | } | |
1318 | } else { | |
1319 | hclge_unic_setup(vport, num_tqps); | |
1320 | } | |
1321 | ||
1322 | return 0; | |
1323 | } | |
1324 | ||
1325 | static int hclge_alloc_vport(struct hclge_dev *hdev) | |
1326 | { | |
1327 | struct pci_dev *pdev = hdev->pdev; | |
1328 | struct hclge_vport *vport; | |
1329 | u32 tqp_main_vport; | |
1330 | u32 tqp_per_vport; | |
1331 | int num_vport, i; | |
1332 | int ret; | |
1333 | ||
1334 | /* We need to alloc a vport for main NIC of PF */ | |
1335 | num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; | |
1336 | ||
1337 | if (hdev->num_tqps < num_vport) | |
1338 | num_vport = hdev->num_tqps; | |
1339 | ||
1340 | /* Alloc the same number of TQPs for every vport */ | |
1341 | tqp_per_vport = hdev->num_tqps / num_vport; | |
1342 | tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; | |
1343 | ||
1344 | vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), | |
1345 | GFP_KERNEL); | |
1346 | if (!vport) | |
1347 | return -ENOMEM; | |
1348 | ||
1349 | hdev->vport = vport; | |
1350 | hdev->num_alloc_vport = num_vport; | |
1351 | ||
1352 | #ifdef CONFIG_PCI_IOV | |
1353 | /* Enable SRIOV */ | |
1354 | if (hdev->num_req_vfs) { | |
1355 | dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", | |
1356 | hdev->num_req_vfs); | |
1357 | ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); | |
1358 | if (ret) { | |
1359 | hdev->num_alloc_vfs = 0; | |
1360 | dev_err(&pdev->dev, "SRIOV enable failed %d\n", | |
1361 | ret); | |
1362 | return ret; | |
1363 | } | |
1364 | } | |
1365 | hdev->num_alloc_vfs = hdev->num_req_vfs; | |
1366 | #endif | |
1367 | ||
1368 | for (i = 0; i < num_vport; i++) { | |
1369 | vport->back = hdev; | |
1370 | vport->vport_id = i; | |
1371 | ||
1372 | if (i == 0) | |
1373 | ret = hclge_vport_setup(vport, tqp_main_vport); | |
1374 | else | |
1375 | ret = hclge_vport_setup(vport, tqp_per_vport); | |
1376 | if (ret) { | |
1377 | dev_err(&pdev->dev, | |
1378 | "vport setup failed for vport %d, %d\n", | |
1379 | i, ret); | |
1380 | return ret; | |
1381 | } | |
1382 | ||
1383 | vport++; | |
1384 | } | |
1385 | ||
1386 | return 0; | |
1387 | } | |
1388 | ||
acf61ecd YL |
1389 | static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, |
1390 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1391 | { |
1392 | /* TX buffer size is unit by 128 byte */ | |
1393 | #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 | |
1394 | #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) | |
d44f9b63 | 1395 | struct hclge_tx_buff_alloc_cmd *req; |
46a3df9f S |
1396 | struct hclge_desc desc; |
1397 | int ret; | |
1398 | u8 i; | |
1399 | ||
d44f9b63 | 1400 | req = (struct hclge_tx_buff_alloc_cmd *)desc.data; |
46a3df9f S |
1401 | |
1402 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); | |
9ffe79a9 | 1403 | for (i = 0; i < HCLGE_TC_NUM; i++) { |
acf61ecd | 1404 | u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 | 1405 | |
46a3df9f S |
1406 | req->tx_pkt_buff[i] = |
1407 | cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | | |
1408 | HCLGE_BUF_SIZE_UPDATE_EN_MSK); | |
9ffe79a9 | 1409 | } |
46a3df9f S |
1410 | |
1411 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1412 | if (ret) { | |
1413 | dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", | |
1414 | ret); | |
1415 | return ret; | |
1416 | } | |
1417 | ||
1418 | return 0; | |
1419 | } | |
1420 | ||
acf61ecd YL |
1421 | static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, |
1422 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1423 | { |
acf61ecd | 1424 | int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); |
46a3df9f S |
1425 | |
1426 | if (ret) { | |
1427 | dev_err(&hdev->pdev->dev, | |
1428 | "tx buffer alloc failed %d\n", ret); | |
1429 | return ret; | |
1430 | } | |
1431 | ||
1432 | return 0; | |
1433 | } | |
1434 | ||
1435 | static int hclge_get_tc_num(struct hclge_dev *hdev) | |
1436 | { | |
1437 | int i, cnt = 0; | |
1438 | ||
1439 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1440 | if (hdev->hw_tc_map & BIT(i)) | |
1441 | cnt++; | |
1442 | return cnt; | |
1443 | } | |
1444 | ||
1445 | static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) | |
1446 | { | |
1447 | int i, cnt = 0; | |
1448 | ||
1449 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
1450 | if (hdev->hw_tc_map & BIT(i) && | |
1451 | hdev->tm_info.hw_pfc_map & BIT(i)) | |
1452 | cnt++; | |
1453 | return cnt; | |
1454 | } | |
1455 | ||
1456 | /* Get the number of pfc enabled TCs, which have private buffer */ | |
acf61ecd YL |
1457 | static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, |
1458 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1459 | { |
1460 | struct hclge_priv_buf *priv; | |
1461 | int i, cnt = 0; | |
1462 | ||
1463 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1464 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1465 | if ((hdev->tm_info.hw_pfc_map & BIT(i)) && |
1466 | priv->enable) | |
1467 | cnt++; | |
1468 | } | |
1469 | ||
1470 | return cnt; | |
1471 | } | |
1472 | ||
1473 | /* Get the number of pfc disabled TCs, which have private buffer */ | |
acf61ecd YL |
1474 | static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, |
1475 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1476 | { |
1477 | struct hclge_priv_buf *priv; | |
1478 | int i, cnt = 0; | |
1479 | ||
1480 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1481 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1482 | if (hdev->hw_tc_map & BIT(i) && |
1483 | !(hdev->tm_info.hw_pfc_map & BIT(i)) && | |
1484 | priv->enable) | |
1485 | cnt++; | |
1486 | } | |
1487 | ||
1488 | return cnt; | |
1489 | } | |
1490 | ||
acf61ecd | 1491 | static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
46a3df9f S |
1492 | { |
1493 | struct hclge_priv_buf *priv; | |
1494 | u32 rx_priv = 0; | |
1495 | int i; | |
1496 | ||
1497 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1498 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1499 | if (priv->enable) |
1500 | rx_priv += priv->buf_size; | |
1501 | } | |
1502 | return rx_priv; | |
1503 | } | |
1504 | ||
acf61ecd | 1505 | static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
9ffe79a9 YL |
1506 | { |
1507 | u32 i, total_tx_size = 0; | |
1508 | ||
1509 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) | |
acf61ecd | 1510 | total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; |
9ffe79a9 YL |
1511 | |
1512 | return total_tx_size; | |
1513 | } | |
1514 | ||
acf61ecd YL |
1515 | static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, |
1516 | struct hclge_pkt_buf_alloc *buf_alloc, | |
1517 | u32 rx_all) | |
46a3df9f S |
1518 | { |
1519 | u32 shared_buf_min, shared_buf_tc, shared_std; | |
1520 | int tc_num, pfc_enable_num; | |
1521 | u32 shared_buf; | |
1522 | u32 rx_priv; | |
1523 | int i; | |
1524 | ||
1525 | tc_num = hclge_get_tc_num(hdev); | |
1526 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); | |
1527 | ||
d221df4e YL |
1528 | if (hnae3_dev_dcb_supported(hdev)) |
1529 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; | |
1530 | else | |
1531 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; | |
1532 | ||
46a3df9f S |
1533 | shared_buf_tc = pfc_enable_num * hdev->mps + |
1534 | (tc_num - pfc_enable_num) * hdev->mps / 2 + | |
1535 | hdev->mps; | |
1536 | shared_std = max_t(u32, shared_buf_min, shared_buf_tc); | |
1537 | ||
acf61ecd | 1538 | rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); |
46a3df9f S |
1539 | if (rx_all <= rx_priv + shared_std) |
1540 | return false; | |
1541 | ||
1542 | shared_buf = rx_all - rx_priv; | |
acf61ecd YL |
1543 | buf_alloc->s_buf.buf_size = shared_buf; |
1544 | buf_alloc->s_buf.self.high = shared_buf; | |
1545 | buf_alloc->s_buf.self.low = 2 * hdev->mps; | |
46a3df9f S |
1546 | |
1547 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
1548 | if ((hdev->hw_tc_map & BIT(i)) && | |
1549 | (hdev->tm_info.hw_pfc_map & BIT(i))) { | |
acf61ecd YL |
1550 | buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; |
1551 | buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; | |
46a3df9f | 1552 | } else { |
acf61ecd YL |
1553 | buf_alloc->s_buf.tc_thrd[i].low = 0; |
1554 | buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; | |
46a3df9f S |
1555 | } |
1556 | } | |
1557 | ||
1558 | return true; | |
1559 | } | |
1560 | ||
acf61ecd YL |
1561 | static int hclge_tx_buffer_calc(struct hclge_dev *hdev, |
1562 | struct hclge_pkt_buf_alloc *buf_alloc) | |
9ffe79a9 YL |
1563 | { |
1564 | u32 i, total_size; | |
1565 | ||
1566 | total_size = hdev->pkt_buf_size; | |
1567 | ||
1568 | /* alloc tx buffer for all enabled tc */ | |
1569 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1570 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
9ffe79a9 YL |
1571 | |
1572 | if (total_size < HCLGE_DEFAULT_TX_BUF) | |
1573 | return -ENOMEM; | |
1574 | ||
1575 | if (hdev->hw_tc_map & BIT(i)) | |
1576 | priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; | |
1577 | else | |
1578 | priv->tx_buf_size = 0; | |
1579 | ||
1580 | total_size -= priv->tx_buf_size; | |
1581 | } | |
1582 | ||
1583 | return 0; | |
1584 | } | |
1585 | ||
46a3df9f S |
1586 | /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs |
1587 | * @hdev: pointer to struct hclge_dev | |
acf61ecd | 1588 | * @buf_alloc: pointer to buffer calculation data |
46a3df9f S |
1589 | * @return: 0: calculate sucessful, negative: fail |
1590 | */ | |
1db9b1bf YL |
1591 | static int hclge_rx_buffer_calc(struct hclge_dev *hdev, |
1592 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1593 | { |
9ffe79a9 | 1594 | u32 rx_all = hdev->pkt_buf_size; |
46a3df9f S |
1595 | int no_pfc_priv_num, pfc_priv_num; |
1596 | struct hclge_priv_buf *priv; | |
1597 | int i; | |
1598 | ||
acf61ecd | 1599 | rx_all -= hclge_get_tx_buff_alloced(buf_alloc); |
9ffe79a9 | 1600 | |
d602a525 YL |
1601 | /* When DCB is not supported, rx private |
1602 | * buffer is not allocated. | |
1603 | */ | |
1604 | if (!hnae3_dev_dcb_supported(hdev)) { | |
acf61ecd | 1605 | if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
d602a525 YL |
1606 | return -ENOMEM; |
1607 | ||
1608 | return 0; | |
1609 | } | |
1610 | ||
46a3df9f S |
1611 | /* step 1, try to alloc private buffer for all enabled tc */ |
1612 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1613 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1614 | if (hdev->hw_tc_map & BIT(i)) { |
1615 | priv->enable = 1; | |
1616 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1617 | priv->wl.low = hdev->mps; | |
1618 | priv->wl.high = priv->wl.low + hdev->mps; | |
1619 | priv->buf_size = priv->wl.high + | |
1620 | HCLGE_DEFAULT_DV; | |
1621 | } else { | |
1622 | priv->wl.low = 0; | |
1623 | priv->wl.high = 2 * hdev->mps; | |
1624 | priv->buf_size = priv->wl.high; | |
1625 | } | |
bb1fe9ea YL |
1626 | } else { |
1627 | priv->enable = 0; | |
1628 | priv->wl.low = 0; | |
1629 | priv->wl.high = 0; | |
1630 | priv->buf_size = 0; | |
46a3df9f S |
1631 | } |
1632 | } | |
1633 | ||
acf61ecd | 1634 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1635 | return 0; |
1636 | ||
1637 | /* step 2, try to decrease the buffer size of | |
1638 | * no pfc TC's private buffer | |
1639 | */ | |
1640 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1641 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f | 1642 | |
bb1fe9ea YL |
1643 | priv->enable = 0; |
1644 | priv->wl.low = 0; | |
1645 | priv->wl.high = 0; | |
1646 | priv->buf_size = 0; | |
1647 | ||
1648 | if (!(hdev->hw_tc_map & BIT(i))) | |
1649 | continue; | |
1650 | ||
1651 | priv->enable = 1; | |
46a3df9f S |
1652 | |
1653 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1654 | priv->wl.low = 128; | |
1655 | priv->wl.high = priv->wl.low + hdev->mps; | |
1656 | priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; | |
1657 | } else { | |
1658 | priv->wl.low = 0; | |
1659 | priv->wl.high = hdev->mps; | |
1660 | priv->buf_size = priv->wl.high; | |
1661 | } | |
1662 | } | |
1663 | ||
acf61ecd | 1664 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1665 | return 0; |
1666 | ||
1667 | /* step 3, try to reduce the number of pfc disabled TCs, | |
1668 | * which have private buffer | |
1669 | */ | |
1670 | /* get the total no pfc enable TC number, which have private buffer */ | |
acf61ecd | 1671 | no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1672 | |
1673 | /* let the last to be cleared first */ | |
1674 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1675 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1676 | |
1677 | if (hdev->hw_tc_map & BIT(i) && | |
1678 | !(hdev->tm_info.hw_pfc_map & BIT(i))) { | |
1679 | /* Clear the no pfc TC private buffer */ | |
1680 | priv->wl.low = 0; | |
1681 | priv->wl.high = 0; | |
1682 | priv->buf_size = 0; | |
1683 | priv->enable = 0; | |
1684 | no_pfc_priv_num--; | |
1685 | } | |
1686 | ||
acf61ecd | 1687 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1688 | no_pfc_priv_num == 0) |
1689 | break; | |
1690 | } | |
1691 | ||
acf61ecd | 1692 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1693 | return 0; |
1694 | ||
1695 | /* step 4, try to reduce the number of pfc enabled TCs | |
1696 | * which have private buffer. | |
1697 | */ | |
acf61ecd | 1698 | pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); |
46a3df9f S |
1699 | |
1700 | /* let the last to be cleared first */ | |
1701 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { | |
acf61ecd | 1702 | priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1703 | |
1704 | if (hdev->hw_tc_map & BIT(i) && | |
1705 | hdev->tm_info.hw_pfc_map & BIT(i)) { | |
1706 | /* Reduce the number of pfc TC with private buffer */ | |
1707 | priv->wl.low = 0; | |
1708 | priv->enable = 0; | |
1709 | priv->wl.high = 0; | |
1710 | priv->buf_size = 0; | |
1711 | pfc_priv_num--; | |
1712 | } | |
1713 | ||
acf61ecd | 1714 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
46a3df9f S |
1715 | pfc_priv_num == 0) |
1716 | break; | |
1717 | } | |
acf61ecd | 1718 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
46a3df9f S |
1719 | return 0; |
1720 | ||
1721 | return -ENOMEM; | |
1722 | } | |
1723 | ||
acf61ecd YL |
1724 | static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, |
1725 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1726 | { |
d44f9b63 | 1727 | struct hclge_rx_priv_buff_cmd *req; |
46a3df9f S |
1728 | struct hclge_desc desc; |
1729 | int ret; | |
1730 | int i; | |
1731 | ||
1732 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); | |
d44f9b63 | 1733 | req = (struct hclge_rx_priv_buff_cmd *)desc.data; |
46a3df9f S |
1734 | |
1735 | /* Alloc private buffer TCs */ | |
1736 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
acf61ecd | 1737 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
46a3df9f S |
1738 | |
1739 | req->buf_num[i] = | |
1740 | cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); | |
1741 | req->buf_num[i] |= | |
5bca3b94 | 1742 | cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); |
46a3df9f S |
1743 | } |
1744 | ||
b8c8bf47 | 1745 | req->shared_buf = |
acf61ecd | 1746 | cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | |
b8c8bf47 YL |
1747 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); |
1748 | ||
46a3df9f S |
1749 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
1750 | if (ret) { | |
1751 | dev_err(&hdev->pdev->dev, | |
1752 | "rx private buffer alloc cmd failed %d\n", ret); | |
1753 | return ret; | |
1754 | } | |
1755 | ||
1756 | return 0; | |
1757 | } | |
1758 | ||
1759 | #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) | |
1760 | ||
acf61ecd YL |
1761 | static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, |
1762 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f S |
1763 | { |
1764 | struct hclge_rx_priv_wl_buf *req; | |
1765 | struct hclge_priv_buf *priv; | |
1766 | struct hclge_desc desc[2]; | |
1767 | int i, j; | |
1768 | int ret; | |
1769 | ||
1770 | for (i = 0; i < 2; i++) { | |
1771 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, | |
1772 | false); | |
1773 | req = (struct hclge_rx_priv_wl_buf *)desc[i].data; | |
1774 | ||
1775 | /* The first descriptor set the NEXT bit to 1 */ | |
1776 | if (i == 0) | |
1777 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1778 | else | |
1779 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1780 | ||
1781 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
acf61ecd YL |
1782 | u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; |
1783 | ||
1784 | priv = &buf_alloc->priv_buf[idx]; | |
46a3df9f S |
1785 | req->tc_wl[j].high = |
1786 | cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); | |
1787 | req->tc_wl[j].high |= | |
1788 | cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << | |
1789 | HCLGE_RX_PRIV_EN_B); | |
1790 | req->tc_wl[j].low = | |
1791 | cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); | |
1792 | req->tc_wl[j].low |= | |
1793 | cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << | |
1794 | HCLGE_RX_PRIV_EN_B); | |
1795 | } | |
1796 | } | |
1797 | ||
1798 | /* Send 2 descriptor at one time */ | |
1799 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
1800 | if (ret) { | |
1801 | dev_err(&hdev->pdev->dev, | |
1802 | "rx private waterline config cmd failed %d\n", | |
1803 | ret); | |
1804 | return ret; | |
1805 | } | |
1806 | return 0; | |
1807 | } | |
1808 | ||
acf61ecd YL |
1809 | static int hclge_common_thrd_config(struct hclge_dev *hdev, |
1810 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1811 | { |
acf61ecd | 1812 | struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; |
46a3df9f S |
1813 | struct hclge_rx_com_thrd *req; |
1814 | struct hclge_desc desc[2]; | |
1815 | struct hclge_tc_thrd *tc; | |
1816 | int i, j; | |
1817 | int ret; | |
1818 | ||
1819 | for (i = 0; i < 2; i++) { | |
1820 | hclge_cmd_setup_basic_desc(&desc[i], | |
1821 | HCLGE_OPC_RX_COM_THRD_ALLOC, false); | |
1822 | req = (struct hclge_rx_com_thrd *)&desc[i].data; | |
1823 | ||
1824 | /* The first descriptor set the NEXT bit to 1 */ | |
1825 | if (i == 0) | |
1826 | desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1827 | else | |
1828 | desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
1829 | ||
1830 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { | |
1831 | tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; | |
1832 | ||
1833 | req->com_thrd[j].high = | |
1834 | cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); | |
1835 | req->com_thrd[j].high |= | |
1836 | cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << | |
1837 | HCLGE_RX_PRIV_EN_B); | |
1838 | req->com_thrd[j].low = | |
1839 | cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); | |
1840 | req->com_thrd[j].low |= | |
1841 | cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << | |
1842 | HCLGE_RX_PRIV_EN_B); | |
1843 | } | |
1844 | } | |
1845 | ||
1846 | /* Send 2 descriptors at one time */ | |
1847 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
1848 | if (ret) { | |
1849 | dev_err(&hdev->pdev->dev, | |
1850 | "common threshold config cmd failed %d\n", ret); | |
1851 | return ret; | |
1852 | } | |
1853 | return 0; | |
1854 | } | |
1855 | ||
acf61ecd YL |
1856 | static int hclge_common_wl_config(struct hclge_dev *hdev, |
1857 | struct hclge_pkt_buf_alloc *buf_alloc) | |
46a3df9f | 1858 | { |
acf61ecd | 1859 | struct hclge_shared_buf *buf = &buf_alloc->s_buf; |
46a3df9f S |
1860 | struct hclge_rx_com_wl *req; |
1861 | struct hclge_desc desc; | |
1862 | int ret; | |
1863 | ||
1864 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); | |
1865 | ||
1866 | req = (struct hclge_rx_com_wl *)desc.data; | |
1867 | req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); | |
1868 | req->com_wl.high |= | |
1869 | cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << | |
1870 | HCLGE_RX_PRIV_EN_B); | |
1871 | ||
1872 | req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); | |
1873 | req->com_wl.low |= | |
1874 | cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << | |
1875 | HCLGE_RX_PRIV_EN_B); | |
1876 | ||
1877 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
1878 | if (ret) { | |
1879 | dev_err(&hdev->pdev->dev, | |
1880 | "common waterline config cmd failed %d\n", ret); | |
1881 | return ret; | |
1882 | } | |
1883 | ||
1884 | return 0; | |
1885 | } | |
1886 | ||
1887 | int hclge_buffer_alloc(struct hclge_dev *hdev) | |
1888 | { | |
acf61ecd | 1889 | struct hclge_pkt_buf_alloc *pkt_buf; |
46a3df9f S |
1890 | int ret; |
1891 | ||
acf61ecd YL |
1892 | pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); |
1893 | if (!pkt_buf) | |
46a3df9f S |
1894 | return -ENOMEM; |
1895 | ||
acf61ecd | 1896 | ret = hclge_tx_buffer_calc(hdev, pkt_buf); |
9ffe79a9 YL |
1897 | if (ret) { |
1898 | dev_err(&hdev->pdev->dev, | |
1899 | "could not calc tx buffer size for all TCs %d\n", ret); | |
acf61ecd | 1900 | goto out; |
9ffe79a9 YL |
1901 | } |
1902 | ||
acf61ecd | 1903 | ret = hclge_tx_buffer_alloc(hdev, pkt_buf); |
46a3df9f S |
1904 | if (ret) { |
1905 | dev_err(&hdev->pdev->dev, | |
1906 | "could not alloc tx buffers %d\n", ret); | |
acf61ecd | 1907 | goto out; |
46a3df9f S |
1908 | } |
1909 | ||
acf61ecd | 1910 | ret = hclge_rx_buffer_calc(hdev, pkt_buf); |
46a3df9f S |
1911 | if (ret) { |
1912 | dev_err(&hdev->pdev->dev, | |
1913 | "could not calc rx priv buffer size for all TCs %d\n", | |
1914 | ret); | |
acf61ecd | 1915 | goto out; |
46a3df9f S |
1916 | } |
1917 | ||
acf61ecd | 1918 | ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); |
46a3df9f S |
1919 | if (ret) { |
1920 | dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", | |
1921 | ret); | |
acf61ecd | 1922 | goto out; |
46a3df9f S |
1923 | } |
1924 | ||
2daf4a65 | 1925 | if (hnae3_dev_dcb_supported(hdev)) { |
acf61ecd | 1926 | ret = hclge_rx_priv_wl_config(hdev, pkt_buf); |
2daf4a65 YL |
1927 | if (ret) { |
1928 | dev_err(&hdev->pdev->dev, | |
1929 | "could not configure rx private waterline %d\n", | |
1930 | ret); | |
acf61ecd | 1931 | goto out; |
2daf4a65 | 1932 | } |
46a3df9f | 1933 | |
acf61ecd | 1934 | ret = hclge_common_thrd_config(hdev, pkt_buf); |
2daf4a65 YL |
1935 | if (ret) { |
1936 | dev_err(&hdev->pdev->dev, | |
1937 | "could not configure common threshold %d\n", | |
1938 | ret); | |
acf61ecd | 1939 | goto out; |
2daf4a65 | 1940 | } |
46a3df9f S |
1941 | } |
1942 | ||
acf61ecd YL |
1943 | ret = hclge_common_wl_config(hdev, pkt_buf); |
1944 | if (ret) | |
46a3df9f S |
1945 | dev_err(&hdev->pdev->dev, |
1946 | "could not configure common waterline %d\n", ret); | |
46a3df9f | 1947 | |
acf61ecd YL |
1948 | out: |
1949 | kfree(pkt_buf); | |
1950 | return ret; | |
46a3df9f S |
1951 | } |
1952 | ||
1953 | static int hclge_init_roce_base_info(struct hclge_vport *vport) | |
1954 | { | |
1955 | struct hnae3_handle *roce = &vport->roce; | |
1956 | struct hnae3_handle *nic = &vport->nic; | |
1957 | ||
887c3820 | 1958 | roce->rinfo.num_vectors = vport->back->num_roce_msi; |
46a3df9f S |
1959 | |
1960 | if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || | |
1961 | vport->back->num_msi_left == 0) | |
1962 | return -EINVAL; | |
1963 | ||
1964 | roce->rinfo.base_vector = vport->back->roce_base_vector; | |
1965 | ||
1966 | roce->rinfo.netdev = nic->kinfo.netdev; | |
1967 | roce->rinfo.roce_io_base = vport->back->hw.io_base; | |
1968 | ||
1969 | roce->pdev = nic->pdev; | |
1970 | roce->ae_algo = nic->ae_algo; | |
1971 | roce->numa_node_mask = nic->numa_node_mask; | |
1972 | ||
1973 | return 0; | |
1974 | } | |
1975 | ||
887c3820 | 1976 | static int hclge_init_msi(struct hclge_dev *hdev) |
46a3df9f S |
1977 | { |
1978 | struct pci_dev *pdev = hdev->pdev; | |
887c3820 SM |
1979 | int vectors; |
1980 | int i; | |
46a3df9f | 1981 | |
887c3820 SM |
1982 | vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, |
1983 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | |
1984 | if (vectors < 0) { | |
1985 | dev_err(&pdev->dev, | |
1986 | "failed(%d) to allocate MSI/MSI-X vectors\n", | |
1987 | vectors); | |
1988 | return vectors; | |
46a3df9f | 1989 | } |
887c3820 SM |
1990 | if (vectors < hdev->num_msi) |
1991 | dev_warn(&hdev->pdev->dev, | |
1992 | "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", | |
1993 | hdev->num_msi, vectors); | |
46a3df9f | 1994 | |
887c3820 SM |
1995 | hdev->num_msi = vectors; |
1996 | hdev->num_msi_left = vectors; | |
1997 | hdev->base_msi_vector = pdev->irq; | |
46a3df9f S |
1998 | hdev->roce_base_vector = hdev->base_msi_vector + |
1999 | HCLGE_ROCE_VECTOR_OFFSET; | |
2000 | ||
46a3df9f S |
2001 | hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, |
2002 | sizeof(u16), GFP_KERNEL); | |
887c3820 SM |
2003 | if (!hdev->vector_status) { |
2004 | pci_free_irq_vectors(pdev); | |
46a3df9f | 2005 | return -ENOMEM; |
887c3820 | 2006 | } |
46a3df9f S |
2007 | |
2008 | for (i = 0; i < hdev->num_msi; i++) | |
2009 | hdev->vector_status[i] = HCLGE_INVALID_VPORT; | |
2010 | ||
887c3820 SM |
2011 | hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, |
2012 | sizeof(int), GFP_KERNEL); | |
2013 | if (!hdev->vector_irq) { | |
2014 | pci_free_irq_vectors(pdev); | |
2015 | return -ENOMEM; | |
46a3df9f | 2016 | } |
46a3df9f S |
2017 | |
2018 | return 0; | |
2019 | } | |
2020 | ||
2021 | static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) | |
2022 | { | |
2023 | struct hclge_mac *mac = &hdev->hw.mac; | |
2024 | ||
2025 | if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) | |
2026 | mac->duplex = (u8)duplex; | |
2027 | else | |
2028 | mac->duplex = HCLGE_MAC_FULL; | |
2029 | ||
2030 | mac->speed = speed; | |
2031 | } | |
2032 | ||
2033 | int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) | |
2034 | { | |
d44f9b63 | 2035 | struct hclge_config_mac_speed_dup_cmd *req; |
46a3df9f S |
2036 | struct hclge_desc desc; |
2037 | int ret; | |
2038 | ||
d44f9b63 | 2039 | req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; |
46a3df9f S |
2040 | |
2041 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); | |
2042 | ||
2043 | hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); | |
2044 | ||
2045 | switch (speed) { | |
2046 | case HCLGE_MAC_SPEED_10M: | |
2047 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2048 | HCLGE_CFG_SPEED_S, 6); | |
2049 | break; | |
2050 | case HCLGE_MAC_SPEED_100M: | |
2051 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2052 | HCLGE_CFG_SPEED_S, 7); | |
2053 | break; | |
2054 | case HCLGE_MAC_SPEED_1G: | |
2055 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2056 | HCLGE_CFG_SPEED_S, 0); | |
2057 | break; | |
2058 | case HCLGE_MAC_SPEED_10G: | |
2059 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2060 | HCLGE_CFG_SPEED_S, 1); | |
2061 | break; | |
2062 | case HCLGE_MAC_SPEED_25G: | |
2063 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2064 | HCLGE_CFG_SPEED_S, 2); | |
2065 | break; | |
2066 | case HCLGE_MAC_SPEED_40G: | |
2067 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2068 | HCLGE_CFG_SPEED_S, 3); | |
2069 | break; | |
2070 | case HCLGE_MAC_SPEED_50G: | |
2071 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2072 | HCLGE_CFG_SPEED_S, 4); | |
2073 | break; | |
2074 | case HCLGE_MAC_SPEED_100G: | |
2075 | hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, | |
2076 | HCLGE_CFG_SPEED_S, 5); | |
2077 | break; | |
2078 | default: | |
d7629e74 | 2079 | dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); |
46a3df9f S |
2080 | return -EINVAL; |
2081 | } | |
2082 | ||
2083 | hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, | |
2084 | 1); | |
2085 | ||
2086 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2087 | if (ret) { | |
2088 | dev_err(&hdev->pdev->dev, | |
2089 | "mac speed/duplex config cmd failed %d.\n", ret); | |
2090 | return ret; | |
2091 | } | |
2092 | ||
2093 | hclge_check_speed_dup(hdev, duplex, speed); | |
2094 | ||
2095 | return 0; | |
2096 | } | |
2097 | ||
2098 | static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, | |
2099 | u8 duplex) | |
2100 | { | |
2101 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2102 | struct hclge_dev *hdev = vport->back; | |
2103 | ||
2104 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex); | |
2105 | } | |
2106 | ||
2107 | static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, | |
2108 | u8 *duplex) | |
2109 | { | |
d44f9b63 | 2110 | struct hclge_query_an_speed_dup_cmd *req; |
46a3df9f S |
2111 | struct hclge_desc desc; |
2112 | int speed_tmp; | |
2113 | int ret; | |
2114 | ||
d44f9b63 | 2115 | req = (struct hclge_query_an_speed_dup_cmd *)desc.data; |
46a3df9f S |
2116 | |
2117 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); | |
2118 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2119 | if (ret) { | |
2120 | dev_err(&hdev->pdev->dev, | |
2121 | "mac speed/autoneg/duplex query cmd failed %d\n", | |
2122 | ret); | |
2123 | return ret; | |
2124 | } | |
2125 | ||
2126 | *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); | |
2127 | speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, | |
2128 | HCLGE_QUERY_SPEED_S); | |
2129 | ||
2130 | ret = hclge_parse_speed(speed_tmp, speed); | |
2131 | if (ret) { | |
2132 | dev_err(&hdev->pdev->dev, | |
2133 | "could not parse speed(=%d), %d\n", speed_tmp, ret); | |
2134 | return -EIO; | |
2135 | } | |
2136 | ||
2137 | return 0; | |
2138 | } | |
2139 | ||
46a3df9f S |
2140 | static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) |
2141 | { | |
d44f9b63 | 2142 | struct hclge_config_auto_neg_cmd *req; |
46a3df9f | 2143 | struct hclge_desc desc; |
a90bb9a5 | 2144 | u32 flag = 0; |
46a3df9f S |
2145 | int ret; |
2146 | ||
2147 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); | |
2148 | ||
d44f9b63 | 2149 | req = (struct hclge_config_auto_neg_cmd *)desc.data; |
a90bb9a5 YL |
2150 | hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); |
2151 | req->cfg_an_cmd_flag = cpu_to_le32(flag); | |
46a3df9f S |
2152 | |
2153 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2154 | if (ret) { | |
2155 | dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", | |
2156 | ret); | |
2157 | return ret; | |
2158 | } | |
2159 | ||
2160 | return 0; | |
2161 | } | |
2162 | ||
2163 | static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) | |
2164 | { | |
2165 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2166 | struct hclge_dev *hdev = vport->back; | |
2167 | ||
2168 | return hclge_set_autoneg_en(hdev, enable); | |
2169 | } | |
2170 | ||
2171 | static int hclge_get_autoneg(struct hnae3_handle *handle) | |
2172 | { | |
2173 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2174 | struct hclge_dev *hdev = vport->back; | |
46a3df9f S |
2175 | |
2176 | return hdev->hw.mac.autoneg; | |
2177 | } | |
2178 | ||
6f712727 PL |
2179 | static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, |
2180 | bool mask_vlan, | |
2181 | u8 *mac_mask) | |
2182 | { | |
2183 | struct hclge_mac_vlan_mask_entry_cmd *req; | |
2184 | struct hclge_desc desc; | |
2185 | int status; | |
2186 | ||
2187 | req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; | |
2188 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); | |
2189 | ||
2190 | hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, | |
2191 | mask_vlan ? 1 : 0); | |
2192 | ether_addr_copy(req->mac_mask, mac_mask); | |
2193 | ||
2194 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2195 | if (status) | |
2196 | dev_err(&hdev->pdev->dev, | |
2197 | "Config mac_vlan_mask failed for cmd_send, ret =%d\n", | |
2198 | status); | |
2199 | ||
2200 | return status; | |
2201 | } | |
2202 | ||
46a3df9f S |
2203 | static int hclge_mac_init(struct hclge_dev *hdev) |
2204 | { | |
2205 | struct hclge_mac *mac = &hdev->hw.mac; | |
6f712727 | 2206 | u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; |
46a3df9f S |
2207 | int ret; |
2208 | ||
2209 | ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); | |
2210 | if (ret) { | |
2211 | dev_err(&hdev->pdev->dev, | |
2212 | "Config mac speed dup fail ret=%d\n", ret); | |
2213 | return ret; | |
2214 | } | |
2215 | ||
2216 | mac->link = 0; | |
2217 | ||
46a3df9f S |
2218 | /* Initialize the MTA table work mode */ |
2219 | hdev->accept_mta_mc = true; | |
2220 | hdev->enable_mta = true; | |
2221 | hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; | |
2222 | ||
2223 | ret = hclge_set_mta_filter_mode(hdev, | |
2224 | hdev->mta_mac_sel_type, | |
2225 | hdev->enable_mta); | |
2226 | if (ret) { | |
2227 | dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", | |
2228 | ret); | |
2229 | return ret; | |
2230 | } | |
2231 | ||
6f712727 PL |
2232 | ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); |
2233 | if (ret) { | |
2234 | dev_err(&hdev->pdev->dev, | |
2235 | "set mta filter mode fail ret=%d\n", ret); | |
2236 | return ret; | |
2237 | } | |
2238 | ||
2239 | ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); | |
2240 | if (ret) | |
2241 | dev_err(&hdev->pdev->dev, | |
2242 | "set default mac_vlan_mask fail ret=%d\n", ret); | |
2243 | ||
2244 | return ret; | |
46a3df9f S |
2245 | } |
2246 | ||
22fd3468 SM |
2247 | static void hclge_mbx_task_schedule(struct hclge_dev *hdev) |
2248 | { | |
2249 | if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) | |
2250 | schedule_work(&hdev->mbx_service_task); | |
2251 | } | |
2252 | ||
ed4a1bb8 SM |
2253 | static void hclge_reset_task_schedule(struct hclge_dev *hdev) |
2254 | { | |
2255 | if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) | |
2256 | schedule_work(&hdev->rst_service_task); | |
2257 | } | |
2258 | ||
46a3df9f S |
2259 | static void hclge_task_schedule(struct hclge_dev *hdev) |
2260 | { | |
2261 | if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && | |
2262 | !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && | |
2263 | !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) | |
2264 | (void)schedule_work(&hdev->service_task); | |
2265 | } | |
2266 | ||
2267 | static int hclge_get_mac_link_status(struct hclge_dev *hdev) | |
2268 | { | |
d44f9b63 | 2269 | struct hclge_link_status_cmd *req; |
46a3df9f S |
2270 | struct hclge_desc desc; |
2271 | int link_status; | |
2272 | int ret; | |
2273 | ||
2274 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); | |
2275 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2276 | if (ret) { | |
2277 | dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", | |
2278 | ret); | |
2279 | return ret; | |
2280 | } | |
2281 | ||
d44f9b63 | 2282 | req = (struct hclge_link_status_cmd *)desc.data; |
46a3df9f S |
2283 | link_status = req->status & HCLGE_LINK_STATUS; |
2284 | ||
2285 | return !!link_status; | |
2286 | } | |
2287 | ||
2288 | static int hclge_get_mac_phy_link(struct hclge_dev *hdev) | |
2289 | { | |
2290 | int mac_state; | |
2291 | int link_stat; | |
2292 | ||
2293 | mac_state = hclge_get_mac_link_status(hdev); | |
2294 | ||
2295 | if (hdev->hw.mac.phydev) { | |
2296 | if (!genphy_read_status(hdev->hw.mac.phydev)) | |
2297 | link_stat = mac_state & | |
2298 | hdev->hw.mac.phydev->link; | |
2299 | else | |
2300 | link_stat = 0; | |
2301 | ||
2302 | } else { | |
2303 | link_stat = mac_state; | |
2304 | } | |
2305 | ||
2306 | return !!link_stat; | |
2307 | } | |
2308 | ||
2309 | static void hclge_update_link_status(struct hclge_dev *hdev) | |
2310 | { | |
2311 | struct hnae3_client *client = hdev->nic_client; | |
2312 | struct hnae3_handle *handle; | |
2313 | int state; | |
2314 | int i; | |
2315 | ||
2316 | if (!client) | |
2317 | return; | |
2318 | state = hclge_get_mac_phy_link(hdev); | |
2319 | if (state != hdev->hw.mac.link) { | |
2320 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2321 | handle = &hdev->vport[i].nic; | |
2322 | client->ops->link_status_change(handle, state); | |
2323 | } | |
2324 | hdev->hw.mac.link = state; | |
2325 | } | |
2326 | } | |
2327 | ||
2328 | static int hclge_update_speed_duplex(struct hclge_dev *hdev) | |
2329 | { | |
2330 | struct hclge_mac mac = hdev->hw.mac; | |
2331 | u8 duplex; | |
2332 | int speed; | |
2333 | int ret; | |
2334 | ||
2335 | /* get the speed and duplex as autoneg'result from mac cmd when phy | |
2336 | * doesn't exit. | |
2337 | */ | |
c040366b | 2338 | if (mac.phydev || !mac.autoneg) |
46a3df9f S |
2339 | return 0; |
2340 | ||
2341 | ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); | |
2342 | if (ret) { | |
2343 | dev_err(&hdev->pdev->dev, | |
2344 | "mac autoneg/speed/duplex query failed %d\n", ret); | |
2345 | return ret; | |
2346 | } | |
2347 | ||
2348 | if ((mac.speed != speed) || (mac.duplex != duplex)) { | |
2349 | ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); | |
2350 | if (ret) { | |
2351 | dev_err(&hdev->pdev->dev, | |
2352 | "mac speed/duplex config failed %d\n", ret); | |
2353 | return ret; | |
2354 | } | |
2355 | } | |
2356 | ||
2357 | return 0; | |
2358 | } | |
2359 | ||
2360 | static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) | |
2361 | { | |
2362 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2363 | struct hclge_dev *hdev = vport->back; | |
2364 | ||
2365 | return hclge_update_speed_duplex(hdev); | |
2366 | } | |
2367 | ||
2368 | static int hclge_get_status(struct hnae3_handle *handle) | |
2369 | { | |
2370 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2371 | struct hclge_dev *hdev = vport->back; | |
2372 | ||
2373 | hclge_update_link_status(hdev); | |
2374 | ||
2375 | return hdev->hw.mac.link; | |
2376 | } | |
2377 | ||
d039ef68 | 2378 | static void hclge_service_timer(struct timer_list *t) |
46a3df9f | 2379 | { |
d039ef68 | 2380 | struct hclge_dev *hdev = from_timer(hdev, t, service_timer); |
46a3df9f | 2381 | |
d039ef68 | 2382 | mod_timer(&hdev->service_timer, jiffies + HZ); |
46a3df9f S |
2383 | hclge_task_schedule(hdev); |
2384 | } | |
2385 | ||
2386 | static void hclge_service_complete(struct hclge_dev *hdev) | |
2387 | { | |
2388 | WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); | |
2389 | ||
2390 | /* Flush memory before next watchdog */ | |
2391 | smp_mb__before_atomic(); | |
2392 | clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); | |
2393 | } | |
2394 | ||
202f2014 SM |
2395 | static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) |
2396 | { | |
2397 | u32 rst_src_reg; | |
22fd3468 | 2398 | u32 cmdq_src_reg; |
202f2014 SM |
2399 | |
2400 | /* fetch the events from their corresponding regs */ | |
2401 | rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); | |
22fd3468 SM |
2402 | cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); |
2403 | ||
2404 | /* Assumption: If by any chance reset and mailbox events are reported | |
2405 | * together then we will only process reset event in this go and will | |
2406 | * defer the processing of the mailbox events. Since, we would have not | |
2407 | * cleared RX CMDQ event this time we would receive again another | |
2408 | * interrupt from H/W just for the mailbox. | |
2409 | */ | |
202f2014 SM |
2410 | |
2411 | /* check for vector0 reset event sources */ | |
2412 | if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { | |
2413 | set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); | |
2414 | *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); | |
2415 | return HCLGE_VECTOR0_EVENT_RST; | |
2416 | } | |
2417 | ||
2418 | if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { | |
2419 | set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); | |
2420 | *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); | |
2421 | return HCLGE_VECTOR0_EVENT_RST; | |
2422 | } | |
2423 | ||
2424 | if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { | |
2425 | set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); | |
2426 | *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); | |
2427 | return HCLGE_VECTOR0_EVENT_RST; | |
2428 | } | |
2429 | ||
22fd3468 SM |
2430 | /* check for vector0 mailbox(=CMDQ RX) event source */ |
2431 | if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { | |
2432 | cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); | |
2433 | *clearval = cmdq_src_reg; | |
2434 | return HCLGE_VECTOR0_EVENT_MBX; | |
2435 | } | |
202f2014 SM |
2436 | |
2437 | return HCLGE_VECTOR0_EVENT_OTHER; | |
2438 | } | |
2439 | ||
2440 | static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, | |
2441 | u32 regclr) | |
2442 | { | |
22fd3468 SM |
2443 | switch (event_type) { |
2444 | case HCLGE_VECTOR0_EVENT_RST: | |
202f2014 | 2445 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); |
22fd3468 SM |
2446 | break; |
2447 | case HCLGE_VECTOR0_EVENT_MBX: | |
2448 | hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); | |
2449 | break; | |
2450 | } | |
202f2014 SM |
2451 | } |
2452 | ||
466b0c00 L |
2453 | static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) |
2454 | { | |
2455 | writel(enable ? 1 : 0, vector->addr); | |
2456 | } | |
2457 | ||
2458 | static irqreturn_t hclge_misc_irq_handle(int irq, void *data) | |
2459 | { | |
2460 | struct hclge_dev *hdev = data; | |
202f2014 SM |
2461 | u32 event_cause; |
2462 | u32 clearval; | |
466b0c00 L |
2463 | |
2464 | hclge_enable_vector(&hdev->misc_vector, false); | |
202f2014 SM |
2465 | event_cause = hclge_check_event_cause(hdev, &clearval); |
2466 | ||
22fd3468 | 2467 | /* vector 0 interrupt is shared with reset and mailbox source events.*/ |
202f2014 SM |
2468 | switch (event_cause) { |
2469 | case HCLGE_VECTOR0_EVENT_RST: | |
ed4a1bb8 | 2470 | hclge_reset_task_schedule(hdev); |
202f2014 | 2471 | break; |
22fd3468 SM |
2472 | case HCLGE_VECTOR0_EVENT_MBX: |
2473 | /* If we are here then, | |
2474 | * 1. Either we are not handling any mbx task and we are not | |
2475 | * scheduled as well | |
2476 | * OR | |
2477 | * 2. We could be handling a mbx task but nothing more is | |
2478 | * scheduled. | |
2479 | * In both cases, we should schedule mbx task as there are more | |
2480 | * mbx messages reported by this interrupt. | |
2481 | */ | |
2482 | hclge_mbx_task_schedule(hdev); | |
2483 | ||
202f2014 SM |
2484 | default: |
2485 | dev_dbg(&hdev->pdev->dev, | |
2486 | "received unknown or unhandled event of vector0\n"); | |
2487 | break; | |
2488 | } | |
2489 | ||
2490 | /* we should clear the source of interrupt */ | |
2491 | hclge_clear_event_cause(hdev, event_cause, clearval); | |
2492 | hclge_enable_vector(&hdev->misc_vector, true); | |
466b0c00 L |
2493 | |
2494 | return IRQ_HANDLED; | |
2495 | } | |
2496 | ||
2497 | static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) | |
2498 | { | |
2499 | hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; | |
2500 | hdev->num_msi_left += 1; | |
2501 | hdev->num_msi_used -= 1; | |
2502 | } | |
2503 | ||
2504 | static void hclge_get_misc_vector(struct hclge_dev *hdev) | |
2505 | { | |
2506 | struct hclge_misc_vector *vector = &hdev->misc_vector; | |
2507 | ||
2508 | vector->vector_irq = pci_irq_vector(hdev->pdev, 0); | |
2509 | ||
2510 | vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; | |
2511 | hdev->vector_status[0] = 0; | |
2512 | ||
2513 | hdev->num_msi_left -= 1; | |
2514 | hdev->num_msi_used += 1; | |
2515 | } | |
2516 | ||
2517 | static int hclge_misc_irq_init(struct hclge_dev *hdev) | |
2518 | { | |
2519 | int ret; | |
2520 | ||
2521 | hclge_get_misc_vector(hdev); | |
2522 | ||
202f2014 SM |
2523 | /* this would be explicitly freed in the end */ |
2524 | ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, | |
2525 | 0, "hclge_misc", hdev); | |
466b0c00 L |
2526 | if (ret) { |
2527 | hclge_free_vector(hdev, 0); | |
2528 | dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", | |
2529 | hdev->misc_vector.vector_irq); | |
2530 | } | |
2531 | ||
2532 | return ret; | |
2533 | } | |
2534 | ||
202f2014 SM |
2535 | static void hclge_misc_irq_uninit(struct hclge_dev *hdev) |
2536 | { | |
2537 | free_irq(hdev->misc_vector.vector_irq, hdev); | |
2538 | hclge_free_vector(hdev, 0); | |
2539 | } | |
2540 | ||
4ed340ab L |
2541 | static int hclge_notify_client(struct hclge_dev *hdev, |
2542 | enum hnae3_reset_notify_type type) | |
2543 | { | |
2544 | struct hnae3_client *client = hdev->nic_client; | |
2545 | u16 i; | |
2546 | ||
2547 | if (!client->ops->reset_notify) | |
2548 | return -EOPNOTSUPP; | |
2549 | ||
2550 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
2551 | struct hnae3_handle *handle = &hdev->vport[i].nic; | |
2552 | int ret; | |
2553 | ||
2554 | ret = client->ops->reset_notify(handle, type); | |
2555 | if (ret) | |
2556 | return ret; | |
2557 | } | |
2558 | ||
2559 | return 0; | |
2560 | } | |
2561 | ||
2562 | static int hclge_reset_wait(struct hclge_dev *hdev) | |
2563 | { | |
2564 | #define HCLGE_RESET_WATI_MS 100 | |
2565 | #define HCLGE_RESET_WAIT_CNT 5 | |
2566 | u32 val, reg, reg_bit; | |
2567 | u32 cnt = 0; | |
2568 | ||
2569 | switch (hdev->reset_type) { | |
2570 | case HNAE3_GLOBAL_RESET: | |
2571 | reg = HCLGE_GLOBAL_RESET_REG; | |
2572 | reg_bit = HCLGE_GLOBAL_RESET_BIT; | |
2573 | break; | |
2574 | case HNAE3_CORE_RESET: | |
2575 | reg = HCLGE_GLOBAL_RESET_REG; | |
2576 | reg_bit = HCLGE_CORE_RESET_BIT; | |
2577 | break; | |
2578 | case HNAE3_FUNC_RESET: | |
2579 | reg = HCLGE_FUN_RST_ING; | |
2580 | reg_bit = HCLGE_FUN_RST_ING_B; | |
2581 | break; | |
2582 | default: | |
2583 | dev_err(&hdev->pdev->dev, | |
2584 | "Wait for unsupported reset type: %d\n", | |
2585 | hdev->reset_type); | |
2586 | return -EINVAL; | |
2587 | } | |
2588 | ||
2589 | val = hclge_read_dev(&hdev->hw, reg); | |
2590 | while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { | |
2591 | msleep(HCLGE_RESET_WATI_MS); | |
2592 | val = hclge_read_dev(&hdev->hw, reg); | |
2593 | cnt++; | |
2594 | } | |
2595 | ||
4ed340ab L |
2596 | if (cnt >= HCLGE_RESET_WAIT_CNT) { |
2597 | dev_warn(&hdev->pdev->dev, | |
2598 | "Wait for reset timeout: %d\n", hdev->reset_type); | |
2599 | return -EBUSY; | |
2600 | } | |
2601 | ||
2602 | return 0; | |
2603 | } | |
2604 | ||
2605 | static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) | |
2606 | { | |
2607 | struct hclge_desc desc; | |
2608 | struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; | |
2609 | int ret; | |
2610 | ||
2611 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); | |
2612 | hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); | |
2613 | hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); | |
2614 | req->fun_reset_vfid = func_id; | |
2615 | ||
2616 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2617 | if (ret) | |
2618 | dev_err(&hdev->pdev->dev, | |
2619 | "send function reset cmd fail, status =%d\n", ret); | |
2620 | ||
2621 | return ret; | |
2622 | } | |
2623 | ||
d5752031 | 2624 | static void hclge_do_reset(struct hclge_dev *hdev) |
4ed340ab L |
2625 | { |
2626 | struct pci_dev *pdev = hdev->pdev; | |
2627 | u32 val; | |
2628 | ||
d5752031 | 2629 | switch (hdev->reset_type) { |
4ed340ab L |
2630 | case HNAE3_GLOBAL_RESET: |
2631 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
2632 | hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); | |
2633 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); | |
2634 | dev_info(&pdev->dev, "Global Reset requested\n"); | |
2635 | break; | |
2636 | case HNAE3_CORE_RESET: | |
2637 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); | |
2638 | hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); | |
2639 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); | |
2640 | dev_info(&pdev->dev, "Core Reset requested\n"); | |
2641 | break; | |
2642 | case HNAE3_FUNC_RESET: | |
2643 | dev_info(&pdev->dev, "PF Reset requested\n"); | |
2644 | hclge_func_reset_cmd(hdev, 0); | |
ed4a1bb8 SM |
2645 | /* schedule again to check later */ |
2646 | set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); | |
2647 | hclge_reset_task_schedule(hdev); | |
4ed340ab L |
2648 | break; |
2649 | default: | |
2650 | dev_warn(&pdev->dev, | |
d5752031 | 2651 | "Unsupported reset type: %d\n", hdev->reset_type); |
4ed340ab L |
2652 | break; |
2653 | } | |
2654 | } | |
2655 | ||
d5752031 SM |
2656 | static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, |
2657 | unsigned long *addr) | |
2658 | { | |
2659 | enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; | |
2660 | ||
2661 | /* return the highest priority reset level amongst all */ | |
2662 | if (test_bit(HNAE3_GLOBAL_RESET, addr)) | |
2663 | rst_level = HNAE3_GLOBAL_RESET; | |
2664 | else if (test_bit(HNAE3_CORE_RESET, addr)) | |
2665 | rst_level = HNAE3_CORE_RESET; | |
2666 | else if (test_bit(HNAE3_IMP_RESET, addr)) | |
2667 | rst_level = HNAE3_IMP_RESET; | |
2668 | else if (test_bit(HNAE3_FUNC_RESET, addr)) | |
2669 | rst_level = HNAE3_FUNC_RESET; | |
2670 | ||
2671 | /* now, clear all other resets */ | |
2672 | clear_bit(HNAE3_GLOBAL_RESET, addr); | |
2673 | clear_bit(HNAE3_CORE_RESET, addr); | |
2674 | clear_bit(HNAE3_IMP_RESET, addr); | |
2675 | clear_bit(HNAE3_FUNC_RESET, addr); | |
2676 | ||
2677 | return rst_level; | |
2678 | } | |
2679 | ||
2680 | static void hclge_reset(struct hclge_dev *hdev) | |
2681 | { | |
2682 | /* perform reset of the stack & ae device for a client */ | |
2683 | ||
2684 | hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); | |
2685 | ||
2686 | if (!hclge_reset_wait(hdev)) { | |
2687 | rtnl_lock(); | |
2688 | hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); | |
2689 | hclge_reset_ae_dev(hdev->ae_dev); | |
2690 | hclge_notify_client(hdev, HNAE3_INIT_CLIENT); | |
2691 | rtnl_unlock(); | |
2692 | } else { | |
2693 | /* schedule again to check pending resets later */ | |
2694 | set_bit(hdev->reset_type, &hdev->reset_pending); | |
2695 | hclge_reset_task_schedule(hdev); | |
2696 | } | |
2697 | ||
2698 | hclge_notify_client(hdev, HNAE3_UP_CLIENT); | |
2699 | } | |
2700 | ||
4ed340ab L |
2701 | static void hclge_reset_event(struct hnae3_handle *handle, |
2702 | enum hnae3_reset_type reset) | |
2703 | { | |
2704 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2705 | struct hclge_dev *hdev = vport->back; | |
2706 | ||
2707 | dev_info(&hdev->pdev->dev, | |
2708 | "Receive reset event , reset_type is %d", reset); | |
2709 | ||
2710 | switch (reset) { | |
2711 | case HNAE3_FUNC_RESET: | |
2712 | case HNAE3_CORE_RESET: | |
2713 | case HNAE3_GLOBAL_RESET: | |
ed4a1bb8 SM |
2714 | /* request reset & schedule reset task */ |
2715 | set_bit(reset, &hdev->reset_request); | |
2716 | hclge_reset_task_schedule(hdev); | |
4ed340ab L |
2717 | break; |
2718 | default: | |
2719 | dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset); | |
2720 | break; | |
2721 | } | |
2722 | } | |
2723 | ||
2724 | static void hclge_reset_subtask(struct hclge_dev *hdev) | |
2725 | { | |
d5752031 SM |
2726 | /* check if there is any ongoing reset in the hardware. This status can |
2727 | * be checked from reset_pending. If there is then, we need to wait for | |
2728 | * hardware to complete reset. | |
2729 | * a. If we are able to figure out in reasonable time that hardware | |
2730 | * has fully resetted then, we can proceed with driver, client | |
2731 | * reset. | |
2732 | * b. else, we can come back later to check this status so re-sched | |
2733 | * now. | |
2734 | */ | |
2735 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); | |
2736 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2737 | hclge_reset(hdev); | |
4ed340ab | 2738 | |
d5752031 SM |
2739 | /* check if we got any *new* reset requests to be honored */ |
2740 | hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); | |
2741 | if (hdev->reset_type != HNAE3_NONE_RESET) | |
2742 | hclge_do_reset(hdev); | |
4ed340ab | 2743 | |
4ed340ab L |
2744 | hdev->reset_type = HNAE3_NONE_RESET; |
2745 | } | |
2746 | ||
ed4a1bb8 | 2747 | static void hclge_reset_service_task(struct work_struct *work) |
466b0c00 | 2748 | { |
ed4a1bb8 SM |
2749 | struct hclge_dev *hdev = |
2750 | container_of(work, struct hclge_dev, rst_service_task); | |
2751 | ||
2752 | if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) | |
2753 | return; | |
2754 | ||
2755 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); | |
2756 | ||
4ed340ab | 2757 | hclge_reset_subtask(hdev); |
ed4a1bb8 SM |
2758 | |
2759 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
466b0c00 L |
2760 | } |
2761 | ||
22fd3468 SM |
2762 | static void hclge_mailbox_service_task(struct work_struct *work) |
2763 | { | |
2764 | struct hclge_dev *hdev = | |
2765 | container_of(work, struct hclge_dev, mbx_service_task); | |
2766 | ||
2767 | if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) | |
2768 | return; | |
2769 | ||
2770 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); | |
2771 | ||
2772 | hclge_mbx_handler(hdev); | |
2773 | ||
2774 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
2775 | } | |
2776 | ||
46a3df9f S |
2777 | static void hclge_service_task(struct work_struct *work) |
2778 | { | |
2779 | struct hclge_dev *hdev = | |
2780 | container_of(work, struct hclge_dev, service_task); | |
2781 | ||
2782 | hclge_update_speed_duplex(hdev); | |
2783 | hclge_update_link_status(hdev); | |
2784 | hclge_update_stats_for_all(hdev); | |
2785 | hclge_service_complete(hdev); | |
2786 | } | |
2787 | ||
2788 | static void hclge_disable_sriov(struct hclge_dev *hdev) | |
2789 | { | |
2a32ca13 AB |
2790 | /* If our VFs are assigned we cannot shut down SR-IOV |
2791 | * without causing issues, so just leave the hardware | |
2792 | * available but disabled | |
2793 | */ | |
2794 | if (pci_vfs_assigned(hdev->pdev)) { | |
2795 | dev_warn(&hdev->pdev->dev, | |
2796 | "disabling driver while VFs are assigned\n"); | |
2797 | return; | |
2798 | } | |
46a3df9f | 2799 | |
2a32ca13 | 2800 | pci_disable_sriov(hdev->pdev); |
46a3df9f S |
2801 | } |
2802 | ||
2803 | struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) | |
2804 | { | |
2805 | /* VF handle has no client */ | |
2806 | if (!handle->client) | |
2807 | return container_of(handle, struct hclge_vport, nic); | |
2808 | else if (handle->client->type == HNAE3_CLIENT_ROCE) | |
2809 | return container_of(handle, struct hclge_vport, roce); | |
2810 | else | |
2811 | return container_of(handle, struct hclge_vport, nic); | |
2812 | } | |
2813 | ||
2814 | static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, | |
2815 | struct hnae3_vector_info *vector_info) | |
2816 | { | |
2817 | struct hclge_vport *vport = hclge_get_vport(handle); | |
2818 | struct hnae3_vector_info *vector = vector_info; | |
2819 | struct hclge_dev *hdev = vport->back; | |
2820 | int alloc = 0; | |
2821 | int i, j; | |
2822 | ||
2823 | vector_num = min(hdev->num_msi_left, vector_num); | |
2824 | ||
2825 | for (j = 0; j < vector_num; j++) { | |
2826 | for (i = 1; i < hdev->num_msi; i++) { | |
2827 | if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { | |
2828 | vector->vector = pci_irq_vector(hdev->pdev, i); | |
2829 | vector->io_addr = hdev->hw.io_base + | |
2830 | HCLGE_VECTOR_REG_BASE + | |
2831 | (i - 1) * HCLGE_VECTOR_REG_OFFSET + | |
2832 | vport->vport_id * | |
2833 | HCLGE_VECTOR_VF_OFFSET; | |
2834 | hdev->vector_status[i] = vport->vport_id; | |
887c3820 | 2835 | hdev->vector_irq[i] = vector->vector; |
46a3df9f S |
2836 | |
2837 | vector++; | |
2838 | alloc++; | |
2839 | ||
2840 | break; | |
2841 | } | |
2842 | } | |
2843 | } | |
2844 | hdev->num_msi_left -= alloc; | |
2845 | hdev->num_msi_used += alloc; | |
2846 | ||
2847 | return alloc; | |
2848 | } | |
2849 | ||
2850 | static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) | |
2851 | { | |
2852 | int i; | |
2853 | ||
887c3820 SM |
2854 | for (i = 0; i < hdev->num_msi; i++) |
2855 | if (vector == hdev->vector_irq[i]) | |
2856 | return i; | |
2857 | ||
46a3df9f S |
2858 | return -EINVAL; |
2859 | } | |
2860 | ||
2861 | static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) | |
2862 | { | |
2863 | return HCLGE_RSS_KEY_SIZE; | |
2864 | } | |
2865 | ||
2866 | static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) | |
2867 | { | |
2868 | return HCLGE_RSS_IND_TBL_SIZE; | |
2869 | } | |
2870 | ||
2871 | static int hclge_get_rss_algo(struct hclge_dev *hdev) | |
2872 | { | |
d44f9b63 | 2873 | struct hclge_rss_config_cmd *req; |
46a3df9f S |
2874 | struct hclge_desc desc; |
2875 | int rss_hash_algo; | |
2876 | int ret; | |
2877 | ||
2878 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); | |
2879 | ||
2880 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2881 | if (ret) { | |
2882 | dev_err(&hdev->pdev->dev, | |
2883 | "Get link status error, status =%d\n", ret); | |
2884 | return ret; | |
2885 | } | |
2886 | ||
d44f9b63 | 2887 | req = (struct hclge_rss_config_cmd *)desc.data; |
46a3df9f S |
2888 | rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); |
2889 | ||
2890 | if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) | |
2891 | return ETH_RSS_HASH_TOP; | |
2892 | ||
2893 | return -EINVAL; | |
2894 | } | |
2895 | ||
2896 | static int hclge_set_rss_algo_key(struct hclge_dev *hdev, | |
2897 | const u8 hfunc, const u8 *key) | |
2898 | { | |
d44f9b63 | 2899 | struct hclge_rss_config_cmd *req; |
46a3df9f S |
2900 | struct hclge_desc desc; |
2901 | int key_offset; | |
2902 | int key_size; | |
2903 | int ret; | |
2904 | ||
d44f9b63 | 2905 | req = (struct hclge_rss_config_cmd *)desc.data; |
46a3df9f S |
2906 | |
2907 | for (key_offset = 0; key_offset < 3; key_offset++) { | |
2908 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, | |
2909 | false); | |
2910 | ||
2911 | req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); | |
2912 | req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); | |
2913 | ||
2914 | if (key_offset == 2) | |
2915 | key_size = | |
2916 | HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; | |
2917 | else | |
2918 | key_size = HCLGE_RSS_HASH_KEY_NUM; | |
2919 | ||
2920 | memcpy(req->hash_key, | |
2921 | key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); | |
2922 | ||
2923 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2924 | if (ret) { | |
2925 | dev_err(&hdev->pdev->dev, | |
2926 | "Configure RSS config fail, status = %d\n", | |
2927 | ret); | |
2928 | return ret; | |
2929 | } | |
2930 | } | |
2931 | return 0; | |
2932 | } | |
2933 | ||
2934 | static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) | |
2935 | { | |
d44f9b63 | 2936 | struct hclge_rss_indirection_table_cmd *req; |
46a3df9f S |
2937 | struct hclge_desc desc; |
2938 | int i, j; | |
2939 | int ret; | |
2940 | ||
d44f9b63 | 2941 | req = (struct hclge_rss_indirection_table_cmd *)desc.data; |
46a3df9f S |
2942 | |
2943 | for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { | |
2944 | hclge_cmd_setup_basic_desc | |
2945 | (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); | |
2946 | ||
a90bb9a5 YL |
2947 | req->start_table_index = |
2948 | cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); | |
2949 | req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); | |
46a3df9f S |
2950 | |
2951 | for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) | |
2952 | req->rss_result[j] = | |
2953 | indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; | |
2954 | ||
2955 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2956 | if (ret) { | |
2957 | dev_err(&hdev->pdev->dev, | |
2958 | "Configure rss indir table fail,status = %d\n", | |
2959 | ret); | |
2960 | return ret; | |
2961 | } | |
2962 | } | |
2963 | return 0; | |
2964 | } | |
2965 | ||
2966 | static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, | |
2967 | u16 *tc_size, u16 *tc_offset) | |
2968 | { | |
d44f9b63 | 2969 | struct hclge_rss_tc_mode_cmd *req; |
46a3df9f S |
2970 | struct hclge_desc desc; |
2971 | int ret; | |
2972 | int i; | |
2973 | ||
2974 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); | |
d44f9b63 | 2975 | req = (struct hclge_rss_tc_mode_cmd *)desc.data; |
46a3df9f S |
2976 | |
2977 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
a90bb9a5 YL |
2978 | u16 mode = 0; |
2979 | ||
2980 | hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); | |
2981 | hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, | |
46a3df9f | 2982 | HCLGE_RSS_TC_SIZE_S, tc_size[i]); |
a90bb9a5 | 2983 | hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, |
46a3df9f | 2984 | HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); |
a90bb9a5 YL |
2985 | |
2986 | req->rss_tc_mode[i] = cpu_to_le16(mode); | |
46a3df9f S |
2987 | } |
2988 | ||
2989 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
2990 | if (ret) { | |
2991 | dev_err(&hdev->pdev->dev, | |
2992 | "Configure rss tc mode fail, status = %d\n", ret); | |
2993 | return ret; | |
2994 | } | |
2995 | ||
2996 | return 0; | |
2997 | } | |
2998 | ||
2999 | static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) | |
3000 | { | |
d44f9b63 | 3001 | struct hclge_rss_input_tuple_cmd *req; |
46a3df9f S |
3002 | struct hclge_desc desc; |
3003 | int ret; | |
3004 | ||
3005 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); | |
3006 | ||
d44f9b63 | 3007 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; |
46a3df9f S |
3008 | req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; |
3009 | req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3010 | req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3011 | req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3012 | req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3013 | req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3014 | req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; | |
3015 | req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3016 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3017 | if (ret) { | |
3018 | dev_err(&hdev->pdev->dev, | |
3019 | "Configure rss input fail, status = %d\n", ret); | |
3020 | return ret; | |
3021 | } | |
3022 | ||
3023 | return 0; | |
3024 | } | |
3025 | ||
3026 | static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, | |
3027 | u8 *key, u8 *hfunc) | |
3028 | { | |
3029 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3030 | struct hclge_dev *hdev = vport->back; | |
3031 | int i; | |
3032 | ||
3033 | /* Get hash algorithm */ | |
3034 | if (hfunc) | |
3035 | *hfunc = hclge_get_rss_algo(hdev); | |
3036 | ||
3037 | /* Get the RSS Key required by the user */ | |
3038 | if (key) | |
3039 | memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); | |
3040 | ||
3041 | /* Get indirect table */ | |
3042 | if (indir) | |
3043 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3044 | indir[i] = vport->rss_indirection_tbl[i]; | |
3045 | ||
3046 | return 0; | |
3047 | } | |
3048 | ||
3049 | static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, | |
3050 | const u8 *key, const u8 hfunc) | |
3051 | { | |
3052 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3053 | struct hclge_dev *hdev = vport->back; | |
3054 | u8 hash_algo; | |
3055 | int ret, i; | |
3056 | ||
3057 | /* Set the RSS Hash Key if specififed by the user */ | |
3058 | if (key) { | |
3059 | /* Update the shadow RSS key with user specified qids */ | |
3060 | memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); | |
3061 | ||
3062 | if (hfunc == ETH_RSS_HASH_TOP || | |
3063 | hfunc == ETH_RSS_HASH_NO_CHANGE) | |
3064 | hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
3065 | else | |
3066 | return -EINVAL; | |
3067 | ret = hclge_set_rss_algo_key(hdev, hash_algo, key); | |
3068 | if (ret) | |
3069 | return ret; | |
3070 | } | |
3071 | ||
3072 | /* Update the shadow RSS table with user specified qids */ | |
3073 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
3074 | vport->rss_indirection_tbl[i] = indir[i]; | |
3075 | ||
3076 | /* Update the hardware */ | |
3077 | ret = hclge_set_rss_indir_table(hdev, indir); | |
3078 | return ret; | |
3079 | } | |
3080 | ||
f7db940a L |
3081 | static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) |
3082 | { | |
3083 | u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; | |
3084 | ||
3085 | if (nfc->data & RXH_L4_B_2_3) | |
3086 | hash_sets |= HCLGE_D_PORT_BIT; | |
3087 | else | |
3088 | hash_sets &= ~HCLGE_D_PORT_BIT; | |
3089 | ||
3090 | if (nfc->data & RXH_IP_SRC) | |
3091 | hash_sets |= HCLGE_S_IP_BIT; | |
3092 | else | |
3093 | hash_sets &= ~HCLGE_S_IP_BIT; | |
3094 | ||
3095 | if (nfc->data & RXH_IP_DST) | |
3096 | hash_sets |= HCLGE_D_IP_BIT; | |
3097 | else | |
3098 | hash_sets &= ~HCLGE_D_IP_BIT; | |
3099 | ||
3100 | if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) | |
3101 | hash_sets |= HCLGE_V_TAG_BIT; | |
3102 | ||
3103 | return hash_sets; | |
3104 | } | |
3105 | ||
3106 | static int hclge_set_rss_tuple(struct hnae3_handle *handle, | |
3107 | struct ethtool_rxnfc *nfc) | |
3108 | { | |
3109 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3110 | struct hclge_dev *hdev = vport->back; | |
3111 | struct hclge_rss_input_tuple_cmd *req; | |
3112 | struct hclge_desc desc; | |
3113 | u8 tuple_sets; | |
3114 | int ret; | |
3115 | ||
3116 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | | |
3117 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) | |
3118 | return -EINVAL; | |
3119 | ||
3120 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; | |
3121 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); | |
3122 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3123 | if (ret) { | |
3124 | dev_err(&hdev->pdev->dev, | |
3125 | "Read rss tuple fail, status = %d\n", ret); | |
3126 | return ret; | |
3127 | } | |
3128 | ||
3129 | hclge_cmd_reuse_desc(&desc, false); | |
3130 | ||
3131 | tuple_sets = hclge_get_rss_hash_bits(nfc); | |
3132 | switch (nfc->flow_type) { | |
3133 | case TCP_V4_FLOW: | |
3134 | req->ipv4_tcp_en = tuple_sets; | |
3135 | break; | |
3136 | case TCP_V6_FLOW: | |
3137 | req->ipv6_tcp_en = tuple_sets; | |
3138 | break; | |
3139 | case UDP_V4_FLOW: | |
3140 | req->ipv4_udp_en = tuple_sets; | |
3141 | break; | |
3142 | case UDP_V6_FLOW: | |
3143 | req->ipv6_udp_en = tuple_sets; | |
3144 | break; | |
3145 | case SCTP_V4_FLOW: | |
3146 | req->ipv4_sctp_en = tuple_sets; | |
3147 | break; | |
3148 | case SCTP_V6_FLOW: | |
3149 | if ((nfc->data & RXH_L4_B_0_1) || | |
3150 | (nfc->data & RXH_L4_B_2_3)) | |
3151 | return -EINVAL; | |
3152 | ||
3153 | req->ipv6_sctp_en = tuple_sets; | |
3154 | break; | |
3155 | case IPV4_FLOW: | |
3156 | req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3157 | break; | |
3158 | case IPV6_FLOW: | |
3159 | req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; | |
3160 | break; | |
3161 | default: | |
3162 | return -EINVAL; | |
3163 | } | |
3164 | ||
3165 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3166 | if (ret) | |
3167 | dev_err(&hdev->pdev->dev, | |
3168 | "Set rss tuple fail, status = %d\n", ret); | |
3169 | ||
3170 | return ret; | |
3171 | } | |
3172 | ||
07d29954 L |
3173 | static int hclge_get_rss_tuple(struct hnae3_handle *handle, |
3174 | struct ethtool_rxnfc *nfc) | |
3175 | { | |
3176 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3177 | struct hclge_dev *hdev = vport->back; | |
3178 | struct hclge_rss_input_tuple_cmd *req; | |
3179 | struct hclge_desc desc; | |
3180 | u8 tuple_sets; | |
3181 | int ret; | |
3182 | ||
3183 | nfc->data = 0; | |
3184 | ||
3185 | req = (struct hclge_rss_input_tuple_cmd *)desc.data; | |
3186 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); | |
3187 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3188 | if (ret) { | |
3189 | dev_err(&hdev->pdev->dev, | |
3190 | "Read rss tuple fail, status = %d\n", ret); | |
3191 | return ret; | |
3192 | } | |
3193 | ||
3194 | switch (nfc->flow_type) { | |
3195 | case TCP_V4_FLOW: | |
3196 | tuple_sets = req->ipv4_tcp_en; | |
3197 | break; | |
3198 | case UDP_V4_FLOW: | |
3199 | tuple_sets = req->ipv4_udp_en; | |
3200 | break; | |
3201 | case TCP_V6_FLOW: | |
3202 | tuple_sets = req->ipv6_tcp_en; | |
3203 | break; | |
3204 | case UDP_V6_FLOW: | |
3205 | tuple_sets = req->ipv6_udp_en; | |
3206 | break; | |
3207 | case SCTP_V4_FLOW: | |
3208 | tuple_sets = req->ipv4_sctp_en; | |
3209 | break; | |
3210 | case SCTP_V6_FLOW: | |
3211 | tuple_sets = req->ipv6_sctp_en; | |
3212 | break; | |
3213 | case IPV4_FLOW: | |
3214 | case IPV6_FLOW: | |
3215 | tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; | |
3216 | break; | |
3217 | default: | |
3218 | return -EINVAL; | |
3219 | } | |
3220 | ||
3221 | if (!tuple_sets) | |
3222 | return 0; | |
3223 | ||
3224 | if (tuple_sets & HCLGE_D_PORT_BIT) | |
3225 | nfc->data |= RXH_L4_B_2_3; | |
3226 | if (tuple_sets & HCLGE_S_PORT_BIT) | |
3227 | nfc->data |= RXH_L4_B_0_1; | |
3228 | if (tuple_sets & HCLGE_D_IP_BIT) | |
3229 | nfc->data |= RXH_IP_DST; | |
3230 | if (tuple_sets & HCLGE_S_IP_BIT) | |
3231 | nfc->data |= RXH_IP_SRC; | |
3232 | ||
3233 | return 0; | |
3234 | } | |
3235 | ||
46a3df9f S |
3236 | static int hclge_get_tc_size(struct hnae3_handle *handle) |
3237 | { | |
3238 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3239 | struct hclge_dev *hdev = vport->back; | |
3240 | ||
3241 | return hdev->rss_size_max; | |
3242 | } | |
3243 | ||
77f255c1 | 3244 | int hclge_rss_init_hw(struct hclge_dev *hdev) |
46a3df9f S |
3245 | { |
3246 | const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; | |
3247 | struct hclge_vport *vport = hdev->vport; | |
3248 | u16 tc_offset[HCLGE_MAX_TC_NUM]; | |
3249 | u8 rss_key[HCLGE_RSS_KEY_SIZE]; | |
3250 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | |
3251 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
3252 | u32 *rss_indir = NULL; | |
68ece54e | 3253 | u16 rss_size = 0, roundup_size; |
46a3df9f S |
3254 | const u8 *key; |
3255 | int i, ret, j; | |
3256 | ||
3257 | rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); | |
3258 | if (!rss_indir) | |
3259 | return -ENOMEM; | |
3260 | ||
3261 | /* Get default RSS key */ | |
3262 | netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); | |
3263 | ||
3264 | /* Initialize RSS indirect table for each vport */ | |
3265 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { | |
3266 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { | |
3267 | vport[j].rss_indirection_tbl[i] = | |
68ece54e YL |
3268 | i % vport[j].alloc_rss_size; |
3269 | ||
3270 | /* vport 0 is for PF */ | |
3271 | if (j != 0) | |
3272 | continue; | |
3273 | ||
3274 | rss_size = vport[j].alloc_rss_size; | |
46a3df9f S |
3275 | rss_indir[i] = vport[j].rss_indirection_tbl[i]; |
3276 | } | |
3277 | } | |
3278 | ret = hclge_set_rss_indir_table(hdev, rss_indir); | |
3279 | if (ret) | |
3280 | goto err; | |
3281 | ||
3282 | key = rss_key; | |
3283 | ret = hclge_set_rss_algo_key(hdev, hfunc, key); | |
3284 | if (ret) | |
3285 | goto err; | |
3286 | ||
3287 | ret = hclge_set_rss_input_tuple(hdev); | |
3288 | if (ret) | |
3289 | goto err; | |
3290 | ||
68ece54e YL |
3291 | /* Each TC have the same queue size, and tc_size set to hardware is |
3292 | * the log2 of roundup power of two of rss_size, the acutal queue | |
3293 | * size is limited by indirection table. | |
3294 | */ | |
3295 | if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { | |
3296 | dev_err(&hdev->pdev->dev, | |
3297 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | |
3298 | rss_size); | |
81359617 CJ |
3299 | ret = -EINVAL; |
3300 | goto err; | |
68ece54e YL |
3301 | } |
3302 | ||
3303 | roundup_size = roundup_pow_of_two(rss_size); | |
3304 | roundup_size = ilog2(roundup_size); | |
3305 | ||
46a3df9f | 3306 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
68ece54e | 3307 | tc_valid[i] = 0; |
46a3df9f | 3308 | |
68ece54e YL |
3309 | if (!(hdev->hw_tc_map & BIT(i))) |
3310 | continue; | |
3311 | ||
3312 | tc_valid[i] = 1; | |
3313 | tc_size[i] = roundup_size; | |
3314 | tc_offset[i] = rss_size * i; | |
46a3df9f | 3315 | } |
68ece54e | 3316 | |
46a3df9f S |
3317 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); |
3318 | ||
3319 | err: | |
3320 | kfree(rss_indir); | |
3321 | ||
3322 | return ret; | |
3323 | } | |
3324 | ||
63d7e66f SM |
3325 | int hclge_bind_ring_with_vector(struct hclge_vport *vport, |
3326 | int vector_id, bool en, | |
3327 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3328 | { |
3329 | struct hclge_dev *hdev = vport->back; | |
46a3df9f S |
3330 | struct hnae3_ring_chain_node *node; |
3331 | struct hclge_desc desc; | |
63d7e66f SM |
3332 | struct hclge_ctrl_vector_chain_cmd *req |
3333 | = (struct hclge_ctrl_vector_chain_cmd *)desc.data; | |
3334 | enum hclge_cmd_status status; | |
3335 | enum hclge_opcode_type op; | |
3336 | u16 tqp_type_and_id; | |
46a3df9f S |
3337 | int i; |
3338 | ||
63d7e66f SM |
3339 | op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; |
3340 | hclge_cmd_setup_basic_desc(&desc, op, false); | |
46a3df9f S |
3341 | req->int_vector_id = vector_id; |
3342 | ||
3343 | i = 0; | |
3344 | for (node = ring_chain; node; node = node->next) { | |
63d7e66f SM |
3345 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); |
3346 | hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, | |
3347 | HCLGE_INT_TYPE_S, | |
46a3df9f | 3348 | hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); |
63d7e66f SM |
3349 | hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, |
3350 | HCLGE_TQP_ID_S, node->tqp_index); | |
3351 | req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); | |
46a3df9f S |
3352 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
3353 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | |
63d7e66f | 3354 | req->vfid = vport->vport_id; |
46a3df9f | 3355 | |
63d7e66f SM |
3356 | status = hclge_cmd_send(&hdev->hw, &desc, 1); |
3357 | if (status) { | |
46a3df9f S |
3358 | dev_err(&hdev->pdev->dev, |
3359 | "Map TQP fail, status is %d.\n", | |
63d7e66f SM |
3360 | status); |
3361 | return -EIO; | |
46a3df9f S |
3362 | } |
3363 | i = 0; | |
3364 | ||
3365 | hclge_cmd_setup_basic_desc(&desc, | |
63d7e66f | 3366 | op, |
46a3df9f S |
3367 | false); |
3368 | req->int_vector_id = vector_id; | |
3369 | } | |
3370 | } | |
3371 | ||
3372 | if (i > 0) { | |
3373 | req->int_cause_num = i; | |
63d7e66f SM |
3374 | req->vfid = vport->vport_id; |
3375 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3376 | if (status) { | |
46a3df9f | 3377 | dev_err(&hdev->pdev->dev, |
63d7e66f SM |
3378 | "Map TQP fail, status is %d.\n", status); |
3379 | return -EIO; | |
46a3df9f S |
3380 | } |
3381 | } | |
3382 | ||
3383 | return 0; | |
3384 | } | |
3385 | ||
63d7e66f SM |
3386 | static int hclge_map_ring_to_vector(struct hnae3_handle *handle, |
3387 | int vector, | |
3388 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3389 | { |
3390 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3391 | struct hclge_dev *hdev = vport->back; | |
3392 | int vector_id; | |
3393 | ||
3394 | vector_id = hclge_get_vector_index(hdev, vector); | |
3395 | if (vector_id < 0) { | |
3396 | dev_err(&hdev->pdev->dev, | |
63d7e66f | 3397 | "Get vector index fail. vector_id =%d\n", vector_id); |
46a3df9f S |
3398 | return vector_id; |
3399 | } | |
3400 | ||
63d7e66f | 3401 | return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); |
46a3df9f S |
3402 | } |
3403 | ||
63d7e66f SM |
3404 | static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, |
3405 | int vector, | |
3406 | struct hnae3_ring_chain_node *ring_chain) | |
46a3df9f S |
3407 | { |
3408 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3409 | struct hclge_dev *hdev = vport->back; | |
63d7e66f | 3410 | int vector_id, ret; |
46a3df9f S |
3411 | |
3412 | vector_id = hclge_get_vector_index(hdev, vector); | |
3413 | if (vector_id < 0) { | |
3414 | dev_err(&handle->pdev->dev, | |
3415 | "Get vector index fail. ret =%d\n", vector_id); | |
3416 | return vector_id; | |
3417 | } | |
3418 | ||
63d7e66f SM |
3419 | ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); |
3420 | if (ret) { | |
3421 | dev_err(&handle->pdev->dev, | |
3422 | "Unmap ring from vector fail. vectorid=%d, ret =%d\n", | |
3423 | vector_id, | |
3424 | ret); | |
3425 | return ret; | |
46a3df9f S |
3426 | } |
3427 | ||
63d7e66f SM |
3428 | /* Free this MSIX or MSI vector */ |
3429 | hclge_free_vector(hdev, vector_id); | |
46a3df9f S |
3430 | |
3431 | return 0; | |
3432 | } | |
3433 | ||
3434 | int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, | |
3435 | struct hclge_promisc_param *param) | |
3436 | { | |
d44f9b63 | 3437 | struct hclge_promisc_cfg_cmd *req; |
46a3df9f S |
3438 | struct hclge_desc desc; |
3439 | int ret; | |
3440 | ||
3441 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); | |
3442 | ||
d44f9b63 | 3443 | req = (struct hclge_promisc_cfg_cmd *)desc.data; |
46a3df9f S |
3444 | req->vf_id = param->vf_id; |
3445 | req->flag = (param->enable << HCLGE_PROMISC_EN_B); | |
3446 | ||
3447 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3448 | if (ret) { | |
3449 | dev_err(&hdev->pdev->dev, | |
3450 | "Set promisc mode fail, status is %d.\n", ret); | |
3451 | return ret; | |
3452 | } | |
3453 | return 0; | |
3454 | } | |
3455 | ||
3456 | void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, | |
3457 | bool en_mc, bool en_bc, int vport_id) | |
3458 | { | |
3459 | if (!param) | |
3460 | return; | |
3461 | ||
3462 | memset(param, 0, sizeof(struct hclge_promisc_param)); | |
3463 | if (en_uc) | |
3464 | param->enable = HCLGE_PROMISC_EN_UC; | |
3465 | if (en_mc) | |
3466 | param->enable |= HCLGE_PROMISC_EN_MC; | |
3467 | if (en_bc) | |
3468 | param->enable |= HCLGE_PROMISC_EN_BC; | |
3469 | param->vf_id = vport_id; | |
3470 | } | |
3471 | ||
3472 | static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) | |
3473 | { | |
3474 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3475 | struct hclge_dev *hdev = vport->back; | |
3476 | struct hclge_promisc_param param; | |
3477 | ||
3478 | hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); | |
3479 | hclge_cmd_set_promisc_mode(hdev, ¶m); | |
3480 | } | |
3481 | ||
3482 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) | |
3483 | { | |
3484 | struct hclge_desc desc; | |
d44f9b63 YL |
3485 | struct hclge_config_mac_mode_cmd *req = |
3486 | (struct hclge_config_mac_mode_cmd *)desc.data; | |
a90bb9a5 | 3487 | u32 loop_en = 0; |
46a3df9f S |
3488 | int ret; |
3489 | ||
3490 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); | |
a90bb9a5 YL |
3491 | hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); |
3492 | hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); | |
3493 | hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); | |
3494 | hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); | |
3495 | hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); | |
3496 | hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); | |
3497 | hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); | |
3498 | hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); | |
3499 | hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); | |
3500 | hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); | |
3501 | hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); | |
3502 | hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); | |
3503 | hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); | |
3504 | hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); | |
3505 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); | |
46a3df9f S |
3506 | |
3507 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3508 | if (ret) | |
3509 | dev_err(&hdev->pdev->dev, | |
3510 | "mac enable fail, ret =%d.\n", ret); | |
3511 | } | |
3512 | ||
c39c4d98 YL |
3513 | static int hclge_set_loopback(struct hnae3_handle *handle, |
3514 | enum hnae3_loop loop_mode, bool en) | |
3515 | { | |
3516 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3517 | struct hclge_config_mac_mode_cmd *req; | |
3518 | struct hclge_dev *hdev = vport->back; | |
3519 | struct hclge_desc desc; | |
3520 | u32 loop_en; | |
3521 | int ret; | |
3522 | ||
3523 | switch (loop_mode) { | |
3524 | case HNAE3_MAC_INTER_LOOP_MAC: | |
3525 | req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; | |
3526 | /* 1 Read out the MAC mode config at first */ | |
3527 | hclge_cmd_setup_basic_desc(&desc, | |
3528 | HCLGE_OPC_CONFIG_MAC_MODE, | |
3529 | true); | |
3530 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3531 | if (ret) { | |
3532 | dev_err(&hdev->pdev->dev, | |
3533 | "mac loopback get fail, ret =%d.\n", | |
3534 | ret); | |
3535 | return ret; | |
3536 | } | |
3537 | ||
3538 | /* 2 Then setup the loopback flag */ | |
3539 | loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); | |
3540 | if (en) | |
3541 | hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1); | |
3542 | else | |
3543 | hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); | |
3544 | ||
3545 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); | |
3546 | ||
3547 | /* 3 Config mac work mode with loopback flag | |
3548 | * and its original configure parameters | |
3549 | */ | |
3550 | hclge_cmd_reuse_desc(&desc, false); | |
3551 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3552 | if (ret) | |
3553 | dev_err(&hdev->pdev->dev, | |
3554 | "mac loopback set fail, ret =%d.\n", ret); | |
3555 | break; | |
3556 | default: | |
3557 | ret = -ENOTSUPP; | |
3558 | dev_err(&hdev->pdev->dev, | |
3559 | "loop_mode %d is not supported\n", loop_mode); | |
3560 | break; | |
3561 | } | |
3562 | ||
3563 | return ret; | |
3564 | } | |
3565 | ||
46a3df9f S |
3566 | static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, |
3567 | int stream_id, bool enable) | |
3568 | { | |
3569 | struct hclge_desc desc; | |
d44f9b63 YL |
3570 | struct hclge_cfg_com_tqp_queue_cmd *req = |
3571 | (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; | |
46a3df9f S |
3572 | int ret; |
3573 | ||
3574 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); | |
3575 | req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); | |
3576 | req->stream_id = cpu_to_le16(stream_id); | |
3577 | req->enable |= enable << HCLGE_TQP_ENABLE_B; | |
3578 | ||
3579 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3580 | if (ret) | |
3581 | dev_err(&hdev->pdev->dev, | |
3582 | "Tqp enable fail, status =%d.\n", ret); | |
3583 | return ret; | |
3584 | } | |
3585 | ||
3586 | static void hclge_reset_tqp_stats(struct hnae3_handle *handle) | |
3587 | { | |
3588 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3589 | struct hnae3_queue *queue; | |
3590 | struct hclge_tqp *tqp; | |
3591 | int i; | |
3592 | ||
3593 | for (i = 0; i < vport->alloc_tqps; i++) { | |
3594 | queue = handle->kinfo.tqp[i]; | |
3595 | tqp = container_of(queue, struct hclge_tqp, q); | |
3596 | memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); | |
3597 | } | |
3598 | } | |
3599 | ||
3600 | static int hclge_ae_start(struct hnae3_handle *handle) | |
3601 | { | |
3602 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3603 | struct hclge_dev *hdev = vport->back; | |
3604 | int i, queue_id, ret; | |
3605 | ||
3606 | for (i = 0; i < vport->alloc_tqps; i++) { | |
3607 | /* todo clear interrupt */ | |
3608 | /* ring enable */ | |
3609 | queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); | |
3610 | if (queue_id < 0) { | |
3611 | dev_warn(&hdev->pdev->dev, | |
3612 | "Get invalid queue id, ignore it\n"); | |
3613 | continue; | |
3614 | } | |
3615 | ||
3616 | hclge_tqp_enable(hdev, queue_id, 0, true); | |
3617 | } | |
3618 | /* mac enable */ | |
3619 | hclge_cfg_mac_mode(hdev, true); | |
3620 | clear_bit(HCLGE_STATE_DOWN, &hdev->state); | |
d039ef68 | 3621 | mod_timer(&hdev->service_timer, jiffies + HZ); |
46a3df9f S |
3622 | |
3623 | ret = hclge_mac_start_phy(hdev); | |
3624 | if (ret) | |
3625 | return ret; | |
3626 | ||
3627 | /* reset tqp stats */ | |
3628 | hclge_reset_tqp_stats(handle); | |
3629 | ||
3630 | return 0; | |
3631 | } | |
3632 | ||
3633 | static void hclge_ae_stop(struct hnae3_handle *handle) | |
3634 | { | |
3635 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3636 | struct hclge_dev *hdev = vport->back; | |
3637 | int i, queue_id; | |
3638 | ||
3639 | for (i = 0; i < vport->alloc_tqps; i++) { | |
3640 | /* Ring disable */ | |
3641 | queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); | |
3642 | if (queue_id < 0) { | |
3643 | dev_warn(&hdev->pdev->dev, | |
3644 | "Get invalid queue id, ignore it\n"); | |
3645 | continue; | |
3646 | } | |
3647 | ||
3648 | hclge_tqp_enable(hdev, queue_id, 0, false); | |
3649 | } | |
3650 | /* Mac disable */ | |
3651 | hclge_cfg_mac_mode(hdev, false); | |
3652 | ||
3653 | hclge_mac_stop_phy(hdev); | |
3654 | ||
3655 | /* reset tqp stats */ | |
3656 | hclge_reset_tqp_stats(handle); | |
3657 | } | |
3658 | ||
3659 | static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, | |
3660 | u16 cmdq_resp, u8 resp_code, | |
3661 | enum hclge_mac_vlan_tbl_opcode op) | |
3662 | { | |
3663 | struct hclge_dev *hdev = vport->back; | |
3664 | int return_status = -EIO; | |
3665 | ||
3666 | if (cmdq_resp) { | |
3667 | dev_err(&hdev->pdev->dev, | |
3668 | "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", | |
3669 | cmdq_resp); | |
3670 | return -EIO; | |
3671 | } | |
3672 | ||
3673 | if (op == HCLGE_MAC_VLAN_ADD) { | |
3674 | if ((!resp_code) || (resp_code == 1)) { | |
3675 | return_status = 0; | |
3676 | } else if (resp_code == 2) { | |
3677 | return_status = -EIO; | |
3678 | dev_err(&hdev->pdev->dev, | |
3679 | "add mac addr failed for uc_overflow.\n"); | |
3680 | } else if (resp_code == 3) { | |
3681 | return_status = -EIO; | |
3682 | dev_err(&hdev->pdev->dev, | |
3683 | "add mac addr failed for mc_overflow.\n"); | |
3684 | } else { | |
3685 | dev_err(&hdev->pdev->dev, | |
3686 | "add mac addr failed for undefined, code=%d.\n", | |
3687 | resp_code); | |
3688 | } | |
3689 | } else if (op == HCLGE_MAC_VLAN_REMOVE) { | |
3690 | if (!resp_code) { | |
3691 | return_status = 0; | |
3692 | } else if (resp_code == 1) { | |
3693 | return_status = -EIO; | |
3694 | dev_dbg(&hdev->pdev->dev, | |
3695 | "remove mac addr failed for miss.\n"); | |
3696 | } else { | |
3697 | dev_err(&hdev->pdev->dev, | |
3698 | "remove mac addr failed for undefined, code=%d.\n", | |
3699 | resp_code); | |
3700 | } | |
3701 | } else if (op == HCLGE_MAC_VLAN_LKUP) { | |
3702 | if (!resp_code) { | |
3703 | return_status = 0; | |
3704 | } else if (resp_code == 1) { | |
3705 | return_status = -EIO; | |
3706 | dev_dbg(&hdev->pdev->dev, | |
3707 | "lookup mac addr failed for miss.\n"); | |
3708 | } else { | |
3709 | dev_err(&hdev->pdev->dev, | |
3710 | "lookup mac addr failed for undefined, code=%d.\n", | |
3711 | resp_code); | |
3712 | } | |
3713 | } else { | |
3714 | return_status = -EIO; | |
3715 | dev_err(&hdev->pdev->dev, | |
3716 | "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", | |
3717 | op); | |
3718 | } | |
3719 | ||
3720 | return return_status; | |
3721 | } | |
3722 | ||
3723 | static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) | |
3724 | { | |
3725 | int word_num; | |
3726 | int bit_num; | |
3727 | ||
3728 | if (vfid > 255 || vfid < 0) | |
3729 | return -EIO; | |
3730 | ||
3731 | if (vfid >= 0 && vfid <= 191) { | |
3732 | word_num = vfid / 32; | |
3733 | bit_num = vfid % 32; | |
3734 | if (clr) | |
a90bb9a5 | 3735 | desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 3736 | else |
a90bb9a5 | 3737 | desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
3738 | } else { |
3739 | word_num = (vfid - 192) / 32; | |
3740 | bit_num = vfid % 32; | |
3741 | if (clr) | |
a90bb9a5 | 3742 | desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
46a3df9f | 3743 | else |
a90bb9a5 | 3744 | desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); |
46a3df9f S |
3745 | } |
3746 | ||
3747 | return 0; | |
3748 | } | |
3749 | ||
3750 | static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) | |
3751 | { | |
3752 | #define HCLGE_DESC_NUMBER 3 | |
3753 | #define HCLGE_FUNC_NUMBER_PER_DESC 6 | |
3754 | int i, j; | |
3755 | ||
3756 | for (i = 0; i < HCLGE_DESC_NUMBER; i++) | |
3757 | for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) | |
3758 | if (desc[i].data[j]) | |
3759 | return false; | |
3760 | ||
3761 | return true; | |
3762 | } | |
3763 | ||
d44f9b63 | 3764 | static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, |
46a3df9f S |
3765 | const u8 *addr) |
3766 | { | |
3767 | const unsigned char *mac_addr = addr; | |
3768 | u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | | |
3769 | (mac_addr[0]) | (mac_addr[1] << 8); | |
3770 | u32 low_val = mac_addr[4] | (mac_addr[5] << 8); | |
3771 | ||
3772 | new_req->mac_addr_hi32 = cpu_to_le32(high_val); | |
3773 | new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); | |
3774 | } | |
3775 | ||
1db9b1bf YL |
3776 | static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, |
3777 | const u8 *addr) | |
46a3df9f S |
3778 | { |
3779 | u16 high_val = addr[1] | (addr[0] << 8); | |
3780 | struct hclge_dev *hdev = vport->back; | |
3781 | u32 rsh = 4 - hdev->mta_mac_sel_type; | |
3782 | u16 ret_val = (high_val >> rsh) & 0xfff; | |
3783 | ||
3784 | return ret_val; | |
3785 | } | |
3786 | ||
3787 | static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, | |
3788 | enum hclge_mta_dmac_sel_type mta_mac_sel, | |
3789 | bool enable) | |
3790 | { | |
d44f9b63 | 3791 | struct hclge_mta_filter_mode_cmd *req; |
46a3df9f S |
3792 | struct hclge_desc desc; |
3793 | int ret; | |
3794 | ||
d44f9b63 | 3795 | req = (struct hclge_mta_filter_mode_cmd *)desc.data; |
46a3df9f S |
3796 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); |
3797 | ||
3798 | hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, | |
3799 | enable); | |
3800 | hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, | |
3801 | HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); | |
3802 | ||
3803 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3804 | if (ret) { | |
3805 | dev_err(&hdev->pdev->dev, | |
3806 | "Config mat filter mode failed for cmd_send, ret =%d.\n", | |
3807 | ret); | |
3808 | return ret; | |
3809 | } | |
3810 | ||
3811 | return 0; | |
3812 | } | |
3813 | ||
3814 | int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, | |
3815 | u8 func_id, | |
3816 | bool enable) | |
3817 | { | |
d44f9b63 | 3818 | struct hclge_cfg_func_mta_filter_cmd *req; |
46a3df9f S |
3819 | struct hclge_desc desc; |
3820 | int ret; | |
3821 | ||
d44f9b63 | 3822 | req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; |
46a3df9f S |
3823 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); |
3824 | ||
3825 | hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, | |
3826 | enable); | |
3827 | req->function_id = func_id; | |
3828 | ||
3829 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3830 | if (ret) { | |
3831 | dev_err(&hdev->pdev->dev, | |
3832 | "Config func_id enable failed for cmd_send, ret =%d.\n", | |
3833 | ret); | |
3834 | return ret; | |
3835 | } | |
3836 | ||
3837 | return 0; | |
3838 | } | |
3839 | ||
3840 | static int hclge_set_mta_table_item(struct hclge_vport *vport, | |
3841 | u16 idx, | |
3842 | bool enable) | |
3843 | { | |
3844 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 3845 | struct hclge_cfg_func_mta_item_cmd *req; |
46a3df9f | 3846 | struct hclge_desc desc; |
a90bb9a5 | 3847 | u16 item_idx = 0; |
46a3df9f S |
3848 | int ret; |
3849 | ||
d44f9b63 | 3850 | req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; |
46a3df9f S |
3851 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); |
3852 | hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); | |
3853 | ||
a90bb9a5 | 3854 | hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, |
46a3df9f | 3855 | HCLGE_CFG_MTA_ITEM_IDX_S, idx); |
a90bb9a5 | 3856 | req->item_idx = cpu_to_le16(item_idx); |
46a3df9f S |
3857 | |
3858 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3859 | if (ret) { | |
3860 | dev_err(&hdev->pdev->dev, | |
3861 | "Config mta table item failed for cmd_send, ret =%d.\n", | |
3862 | ret); | |
3863 | return ret; | |
3864 | } | |
3865 | ||
3866 | return 0; | |
3867 | } | |
3868 | ||
3869 | static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 3870 | struct hclge_mac_vlan_tbl_entry_cmd *req) |
46a3df9f S |
3871 | { |
3872 | struct hclge_dev *hdev = vport->back; | |
3873 | struct hclge_desc desc; | |
3874 | u8 resp_code; | |
a90bb9a5 | 3875 | u16 retval; |
46a3df9f S |
3876 | int ret; |
3877 | ||
3878 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); | |
3879 | ||
d44f9b63 | 3880 | memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
3881 | |
3882 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
3883 | if (ret) { | |
3884 | dev_err(&hdev->pdev->dev, | |
3885 | "del mac addr failed for cmd_send, ret =%d.\n", | |
3886 | ret); | |
3887 | return ret; | |
3888 | } | |
a90bb9a5 YL |
3889 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
3890 | retval = le16_to_cpu(desc.retval); | |
46a3df9f | 3891 | |
a90bb9a5 | 3892 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
3893 | HCLGE_MAC_VLAN_REMOVE); |
3894 | } | |
3895 | ||
3896 | static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 3897 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
3898 | struct hclge_desc *desc, |
3899 | bool is_mc) | |
3900 | { | |
3901 | struct hclge_dev *hdev = vport->back; | |
3902 | u8 resp_code; | |
a90bb9a5 | 3903 | u16 retval; |
46a3df9f S |
3904 | int ret; |
3905 | ||
3906 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); | |
3907 | if (is_mc) { | |
3908 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3909 | memcpy(desc[0].data, | |
3910 | req, | |
d44f9b63 | 3911 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
3912 | hclge_cmd_setup_basic_desc(&desc[1], |
3913 | HCLGE_OPC_MAC_VLAN_ADD, | |
3914 | true); | |
3915 | desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
3916 | hclge_cmd_setup_basic_desc(&desc[2], | |
3917 | HCLGE_OPC_MAC_VLAN_ADD, | |
3918 | true); | |
3919 | ret = hclge_cmd_send(&hdev->hw, desc, 3); | |
3920 | } else { | |
3921 | memcpy(desc[0].data, | |
3922 | req, | |
d44f9b63 | 3923 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f S |
3924 | ret = hclge_cmd_send(&hdev->hw, desc, 1); |
3925 | } | |
3926 | if (ret) { | |
3927 | dev_err(&hdev->pdev->dev, | |
3928 | "lookup mac addr failed for cmd_send, ret =%d.\n", | |
3929 | ret); | |
3930 | return ret; | |
3931 | } | |
a90bb9a5 YL |
3932 | resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; |
3933 | retval = le16_to_cpu(desc[0].retval); | |
46a3df9f | 3934 | |
a90bb9a5 | 3935 | return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, |
46a3df9f S |
3936 | HCLGE_MAC_VLAN_LKUP); |
3937 | } | |
3938 | ||
3939 | static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, | |
d44f9b63 | 3940 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
46a3df9f S |
3941 | struct hclge_desc *mc_desc) |
3942 | { | |
3943 | struct hclge_dev *hdev = vport->back; | |
3944 | int cfg_status; | |
3945 | u8 resp_code; | |
a90bb9a5 | 3946 | u16 retval; |
46a3df9f S |
3947 | int ret; |
3948 | ||
3949 | if (!mc_desc) { | |
3950 | struct hclge_desc desc; | |
3951 | ||
3952 | hclge_cmd_setup_basic_desc(&desc, | |
3953 | HCLGE_OPC_MAC_VLAN_ADD, | |
3954 | false); | |
d44f9b63 YL |
3955 | memcpy(desc.data, req, |
3956 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); | |
46a3df9f | 3957 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
a90bb9a5 YL |
3958 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
3959 | retval = le16_to_cpu(desc.retval); | |
3960 | ||
3961 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
3962 | resp_code, |
3963 | HCLGE_MAC_VLAN_ADD); | |
3964 | } else { | |
c3b6f755 | 3965 | hclge_cmd_reuse_desc(&mc_desc[0], false); |
46a3df9f | 3966 | mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 3967 | hclge_cmd_reuse_desc(&mc_desc[1], false); |
46a3df9f | 3968 | mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
c3b6f755 | 3969 | hclge_cmd_reuse_desc(&mc_desc[2], false); |
46a3df9f S |
3970 | mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); |
3971 | memcpy(mc_desc[0].data, req, | |
d44f9b63 | 3972 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
46a3df9f | 3973 | ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); |
a90bb9a5 YL |
3974 | resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; |
3975 | retval = le16_to_cpu(mc_desc[0].retval); | |
3976 | ||
3977 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, | |
46a3df9f S |
3978 | resp_code, |
3979 | HCLGE_MAC_VLAN_ADD); | |
3980 | } | |
3981 | ||
3982 | if (ret) { | |
3983 | dev_err(&hdev->pdev->dev, | |
3984 | "add mac addr failed for cmd_send, ret =%d.\n", | |
3985 | ret); | |
3986 | return ret; | |
3987 | } | |
3988 | ||
3989 | return cfg_status; | |
3990 | } | |
3991 | ||
3992 | static int hclge_add_uc_addr(struct hnae3_handle *handle, | |
3993 | const unsigned char *addr) | |
3994 | { | |
3995 | struct hclge_vport *vport = hclge_get_vport(handle); | |
3996 | ||
3997 | return hclge_add_uc_addr_common(vport, addr); | |
3998 | } | |
3999 | ||
4000 | int hclge_add_uc_addr_common(struct hclge_vport *vport, | |
4001 | const unsigned char *addr) | |
4002 | { | |
4003 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 4004 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f | 4005 | enum hclge_cmd_status status; |
a90bb9a5 | 4006 | u16 egress_port = 0; |
46a3df9f S |
4007 | |
4008 | /* mac addr check */ | |
4009 | if (is_zero_ether_addr(addr) || | |
4010 | is_broadcast_ether_addr(addr) || | |
4011 | is_multicast_ether_addr(addr)) { | |
4012 | dev_err(&hdev->pdev->dev, | |
4013 | "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", | |
4014 | addr, | |
4015 | is_zero_ether_addr(addr), | |
4016 | is_broadcast_ether_addr(addr), | |
4017 | is_multicast_ether_addr(addr)); | |
4018 | return -EINVAL; | |
4019 | } | |
4020 | ||
4021 | memset(&req, 0, sizeof(req)); | |
4022 | hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); | |
4023 | hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
4024 | hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); | |
4025 | hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
a90bb9a5 YL |
4026 | |
4027 | hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); | |
4028 | hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); | |
4029 | hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, | |
46a3df9f | 4030 | HCLGE_MAC_EPORT_VFID_S, vport->vport_id); |
a90bb9a5 | 4031 | hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, |
46a3df9f | 4032 | HCLGE_MAC_EPORT_PFID_S, 0); |
a90bb9a5 YL |
4033 | |
4034 | req.egress_port = cpu_to_le16(egress_port); | |
46a3df9f S |
4035 | |
4036 | hclge_prepare_mac_addr(&req, addr); | |
4037 | ||
4038 | status = hclge_add_mac_vlan_tbl(vport, &req, NULL); | |
4039 | ||
4040 | return status; | |
4041 | } | |
4042 | ||
4043 | static int hclge_rm_uc_addr(struct hnae3_handle *handle, | |
4044 | const unsigned char *addr) | |
4045 | { | |
4046 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4047 | ||
4048 | return hclge_rm_uc_addr_common(vport, addr); | |
4049 | } | |
4050 | ||
4051 | int hclge_rm_uc_addr_common(struct hclge_vport *vport, | |
4052 | const unsigned char *addr) | |
4053 | { | |
4054 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 4055 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
4056 | enum hclge_cmd_status status; |
4057 | ||
4058 | /* mac addr check */ | |
4059 | if (is_zero_ether_addr(addr) || | |
4060 | is_broadcast_ether_addr(addr) || | |
4061 | is_multicast_ether_addr(addr)) { | |
4062 | dev_dbg(&hdev->pdev->dev, | |
4063 | "Remove mac err! invalid mac:%pM.\n", | |
4064 | addr); | |
4065 | return -EINVAL; | |
4066 | } | |
4067 | ||
4068 | memset(&req, 0, sizeof(req)); | |
4069 | hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); | |
4070 | hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
4071 | hclge_prepare_mac_addr(&req, addr); | |
4072 | status = hclge_remove_mac_vlan_tbl(vport, &req); | |
4073 | ||
4074 | return status; | |
4075 | } | |
4076 | ||
4077 | static int hclge_add_mc_addr(struct hnae3_handle *handle, | |
4078 | const unsigned char *addr) | |
4079 | { | |
4080 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4081 | ||
4082 | return hclge_add_mc_addr_common(vport, addr); | |
4083 | } | |
4084 | ||
4085 | int hclge_add_mc_addr_common(struct hclge_vport *vport, | |
4086 | const unsigned char *addr) | |
4087 | { | |
4088 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 4089 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
4090 | struct hclge_desc desc[3]; |
4091 | u16 tbl_idx; | |
4092 | int status; | |
4093 | ||
4094 | /* mac addr check */ | |
4095 | if (!is_multicast_ether_addr(addr)) { | |
4096 | dev_err(&hdev->pdev->dev, | |
4097 | "Add mc mac err! invalid mac:%pM.\n", | |
4098 | addr); | |
4099 | return -EINVAL; | |
4100 | } | |
4101 | memset(&req, 0, sizeof(req)); | |
4102 | hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); | |
4103 | hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
4104 | hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
4105 | hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
4106 | hclge_prepare_mac_addr(&req, addr); | |
4107 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
4108 | if (!status) { | |
4109 | /* This mac addr exist, update VFID for it */ | |
4110 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
4111 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
4112 | } else { | |
4113 | /* This mac addr do not exist, add new entry for it */ | |
4114 | memset(desc[0].data, 0, sizeof(desc[0].data)); | |
4115 | memset(desc[1].data, 0, sizeof(desc[0].data)); | |
4116 | memset(desc[2].data, 0, sizeof(desc[0].data)); | |
4117 | hclge_update_desc_vfid(desc, vport->vport_id, false); | |
4118 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
4119 | } | |
4120 | ||
4121 | /* Set MTA table for this MAC address */ | |
4122 | tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); | |
4123 | status = hclge_set_mta_table_item(vport, tbl_idx, true); | |
4124 | ||
4125 | return status; | |
4126 | } | |
4127 | ||
4128 | static int hclge_rm_mc_addr(struct hnae3_handle *handle, | |
4129 | const unsigned char *addr) | |
4130 | { | |
4131 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4132 | ||
4133 | return hclge_rm_mc_addr_common(vport, addr); | |
4134 | } | |
4135 | ||
4136 | int hclge_rm_mc_addr_common(struct hclge_vport *vport, | |
4137 | const unsigned char *addr) | |
4138 | { | |
4139 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 4140 | struct hclge_mac_vlan_tbl_entry_cmd req; |
46a3df9f S |
4141 | enum hclge_cmd_status status; |
4142 | struct hclge_desc desc[3]; | |
4143 | u16 tbl_idx; | |
4144 | ||
4145 | /* mac addr check */ | |
4146 | if (!is_multicast_ether_addr(addr)) { | |
4147 | dev_dbg(&hdev->pdev->dev, | |
4148 | "Remove mc mac err! invalid mac:%pM.\n", | |
4149 | addr); | |
4150 | return -EINVAL; | |
4151 | } | |
4152 | ||
4153 | memset(&req, 0, sizeof(req)); | |
4154 | hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); | |
4155 | hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
4156 | hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); | |
4157 | hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); | |
4158 | hclge_prepare_mac_addr(&req, addr); | |
4159 | status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); | |
4160 | if (!status) { | |
4161 | /* This mac addr exist, remove this handle's VFID for it */ | |
4162 | hclge_update_desc_vfid(desc, vport->vport_id, true); | |
4163 | ||
4164 | if (hclge_is_all_function_id_zero(desc)) | |
4165 | /* All the vfid is zero, so need to delete this entry */ | |
4166 | status = hclge_remove_mac_vlan_tbl(vport, &req); | |
4167 | else | |
4168 | /* Not all the vfid is zero, update the vfid */ | |
4169 | status = hclge_add_mac_vlan_tbl(vport, &req, desc); | |
4170 | ||
4171 | } else { | |
4172 | /* This mac addr do not exist, can't delete it */ | |
4173 | dev_err(&hdev->pdev->dev, | |
d7629e74 | 4174 | "Rm multicast mac addr failed, ret = %d.\n", |
46a3df9f S |
4175 | status); |
4176 | return -EIO; | |
4177 | } | |
4178 | ||
4179 | /* Set MTB table for this MAC address */ | |
4180 | tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); | |
4181 | status = hclge_set_mta_table_item(vport, tbl_idx, false); | |
4182 | ||
4183 | return status; | |
4184 | } | |
4185 | ||
4186 | static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) | |
4187 | { | |
4188 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4189 | struct hclge_dev *hdev = vport->back; | |
4190 | ||
4191 | ether_addr_copy(p, hdev->hw.mac.mac_addr); | |
4192 | } | |
4193 | ||
4194 | static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) | |
4195 | { | |
4196 | const unsigned char *new_addr = (const unsigned char *)p; | |
4197 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4198 | struct hclge_dev *hdev = vport->back; | |
4199 | ||
4200 | /* mac addr check */ | |
4201 | if (is_zero_ether_addr(new_addr) || | |
4202 | is_broadcast_ether_addr(new_addr) || | |
4203 | is_multicast_ether_addr(new_addr)) { | |
4204 | dev_err(&hdev->pdev->dev, | |
4205 | "Change uc mac err! invalid mac:%p.\n", | |
4206 | new_addr); | |
4207 | return -EINVAL; | |
4208 | } | |
4209 | ||
4210 | hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); | |
4211 | ||
4212 | if (!hclge_add_uc_addr(handle, new_addr)) { | |
4213 | ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); | |
4214 | return 0; | |
4215 | } | |
4216 | ||
4217 | return -EIO; | |
4218 | } | |
4219 | ||
4220 | static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, | |
4221 | bool filter_en) | |
4222 | { | |
d44f9b63 | 4223 | struct hclge_vlan_filter_ctrl_cmd *req; |
46a3df9f S |
4224 | struct hclge_desc desc; |
4225 | int ret; | |
4226 | ||
4227 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); | |
4228 | ||
d44f9b63 | 4229 | req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; |
46a3df9f S |
4230 | req->vlan_type = vlan_type; |
4231 | req->vlan_fe = filter_en; | |
4232 | ||
4233 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4234 | if (ret) { | |
4235 | dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", | |
4236 | ret); | |
4237 | return ret; | |
4238 | } | |
4239 | ||
4240 | return 0; | |
4241 | } | |
4242 | ||
4243 | int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, | |
4244 | bool is_kill, u16 vlan, u8 qos, __be16 proto) | |
4245 | { | |
4246 | #define HCLGE_MAX_VF_BYTES 16 | |
d44f9b63 YL |
4247 | struct hclge_vlan_filter_vf_cfg_cmd *req0; |
4248 | struct hclge_vlan_filter_vf_cfg_cmd *req1; | |
46a3df9f S |
4249 | struct hclge_desc desc[2]; |
4250 | u8 vf_byte_val; | |
4251 | u8 vf_byte_off; | |
4252 | int ret; | |
4253 | ||
4254 | hclge_cmd_setup_basic_desc(&desc[0], | |
4255 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
4256 | hclge_cmd_setup_basic_desc(&desc[1], | |
4257 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); | |
4258 | ||
4259 | desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); | |
4260 | ||
4261 | vf_byte_off = vfid / 8; | |
4262 | vf_byte_val = 1 << (vfid % 8); | |
4263 | ||
d44f9b63 YL |
4264 | req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
4265 | req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; | |
46a3df9f | 4266 | |
a90bb9a5 | 4267 | req0->vlan_id = cpu_to_le16(vlan); |
46a3df9f S |
4268 | req0->vlan_cfg = is_kill; |
4269 | ||
4270 | if (vf_byte_off < HCLGE_MAX_VF_BYTES) | |
4271 | req0->vf_bitmap[vf_byte_off] = vf_byte_val; | |
4272 | else | |
4273 | req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; | |
4274 | ||
4275 | ret = hclge_cmd_send(&hdev->hw, desc, 2); | |
4276 | if (ret) { | |
4277 | dev_err(&hdev->pdev->dev, | |
4278 | "Send vf vlan command fail, ret =%d.\n", | |
4279 | ret); | |
4280 | return ret; | |
4281 | } | |
4282 | ||
4283 | if (!is_kill) { | |
4284 | if (!req0->resp_code || req0->resp_code == 1) | |
4285 | return 0; | |
4286 | ||
4287 | dev_err(&hdev->pdev->dev, | |
4288 | "Add vf vlan filter fail, ret =%d.\n", | |
4289 | req0->resp_code); | |
4290 | } else { | |
4291 | if (!req0->resp_code) | |
4292 | return 0; | |
4293 | ||
4294 | dev_err(&hdev->pdev->dev, | |
4295 | "Kill vf vlan filter fail, ret =%d.\n", | |
4296 | req0->resp_code); | |
4297 | } | |
4298 | ||
4299 | return -EIO; | |
4300 | } | |
4301 | ||
4302 | static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, | |
4303 | __be16 proto, u16 vlan_id, | |
4304 | bool is_kill) | |
4305 | { | |
4306 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4307 | struct hclge_dev *hdev = vport->back; | |
d44f9b63 | 4308 | struct hclge_vlan_filter_pf_cfg_cmd *req; |
46a3df9f S |
4309 | struct hclge_desc desc; |
4310 | u8 vlan_offset_byte_val; | |
4311 | u8 vlan_offset_byte; | |
4312 | u8 vlan_offset_160; | |
4313 | int ret; | |
4314 | ||
4315 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); | |
4316 | ||
4317 | vlan_offset_160 = vlan_id / 160; | |
4318 | vlan_offset_byte = (vlan_id % 160) / 8; | |
4319 | vlan_offset_byte_val = 1 << (vlan_id % 8); | |
4320 | ||
d44f9b63 | 4321 | req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; |
46a3df9f S |
4322 | req->vlan_offset = vlan_offset_160; |
4323 | req->vlan_cfg = is_kill; | |
4324 | req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; | |
4325 | ||
4326 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4327 | if (ret) { | |
4328 | dev_err(&hdev->pdev->dev, | |
4329 | "port vlan command, send fail, ret =%d.\n", | |
4330 | ret); | |
4331 | return ret; | |
4332 | } | |
4333 | ||
4334 | ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); | |
4335 | if (ret) { | |
4336 | dev_err(&hdev->pdev->dev, | |
4337 | "Set pf vlan filter config fail, ret =%d.\n", | |
4338 | ret); | |
4339 | return -EIO; | |
4340 | } | |
4341 | ||
4342 | return 0; | |
4343 | } | |
4344 | ||
4345 | static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, | |
4346 | u16 vlan, u8 qos, __be16 proto) | |
4347 | { | |
4348 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4349 | struct hclge_dev *hdev = vport->back; | |
4350 | ||
4351 | if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) | |
4352 | return -EINVAL; | |
4353 | if (proto != htons(ETH_P_8021Q)) | |
4354 | return -EPROTONOSUPPORT; | |
4355 | ||
4356 | return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); | |
4357 | } | |
4358 | ||
e62f2a6b PL |
4359 | static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) |
4360 | { | |
4361 | struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; | |
4362 | struct hclge_vport_vtag_tx_cfg_cmd *req; | |
4363 | struct hclge_dev *hdev = vport->back; | |
4364 | struct hclge_desc desc; | |
4365 | int status; | |
4366 | ||
4367 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); | |
4368 | ||
4369 | req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; | |
4370 | req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); | |
4371 | req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); | |
4372 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, | |
4373 | vcfg->accept_tag ? 1 : 0); | |
4374 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, | |
4375 | vcfg->accept_untag ? 1 : 0); | |
4376 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, | |
4377 | vcfg->insert_tag1_en ? 1 : 0); | |
4378 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, | |
4379 | vcfg->insert_tag2_en ? 1 : 0); | |
4380 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); | |
4381 | ||
4382 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
4383 | req->vf_bitmap[req->vf_offset] = | |
4384 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
4385 | ||
4386 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4387 | if (status) | |
4388 | dev_err(&hdev->pdev->dev, | |
4389 | "Send port txvlan cfg command fail, ret =%d\n", | |
4390 | status); | |
4391 | ||
4392 | return status; | |
4393 | } | |
4394 | ||
4395 | static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) | |
4396 | { | |
4397 | struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; | |
4398 | struct hclge_vport_vtag_rx_cfg_cmd *req; | |
4399 | struct hclge_dev *hdev = vport->back; | |
4400 | struct hclge_desc desc; | |
4401 | int status; | |
4402 | ||
4403 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); | |
4404 | ||
4405 | req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; | |
4406 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, | |
4407 | vcfg->strip_tag1_en ? 1 : 0); | |
4408 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, | |
4409 | vcfg->strip_tag2_en ? 1 : 0); | |
4410 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, | |
4411 | vcfg->vlan1_vlan_prionly ? 1 : 0); | |
4412 | hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, | |
4413 | vcfg->vlan2_vlan_prionly ? 1 : 0); | |
4414 | ||
4415 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; | |
4416 | req->vf_bitmap[req->vf_offset] = | |
4417 | 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); | |
4418 | ||
4419 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4420 | if (status) | |
4421 | dev_err(&hdev->pdev->dev, | |
4422 | "Send port rxvlan cfg command fail, ret =%d\n", | |
4423 | status); | |
4424 | ||
4425 | return status; | |
4426 | } | |
4427 | ||
4428 | static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) | |
4429 | { | |
4430 | struct hclge_rx_vlan_type_cfg_cmd *rx_req; | |
4431 | struct hclge_tx_vlan_type_cfg_cmd *tx_req; | |
4432 | struct hclge_desc desc; | |
4433 | int status; | |
4434 | ||
4435 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); | |
4436 | rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; | |
4437 | rx_req->ot_fst_vlan_type = | |
4438 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); | |
4439 | rx_req->ot_sec_vlan_type = | |
4440 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); | |
4441 | rx_req->in_fst_vlan_type = | |
4442 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); | |
4443 | rx_req->in_sec_vlan_type = | |
4444 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); | |
4445 | ||
4446 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4447 | if (status) { | |
4448 | dev_err(&hdev->pdev->dev, | |
4449 | "Send rxvlan protocol type command fail, ret =%d\n", | |
4450 | status); | |
4451 | return status; | |
4452 | } | |
4453 | ||
4454 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); | |
4455 | ||
4456 | tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; | |
4457 | tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); | |
4458 | tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); | |
4459 | ||
4460 | status = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4461 | if (status) | |
4462 | dev_err(&hdev->pdev->dev, | |
4463 | "Send txvlan protocol type command fail, ret =%d\n", | |
4464 | status); | |
4465 | ||
4466 | return status; | |
4467 | } | |
4468 | ||
46a3df9f S |
4469 | static int hclge_init_vlan_config(struct hclge_dev *hdev) |
4470 | { | |
e62f2a6b PL |
4471 | #define HCLGE_FILTER_TYPE_VF 0 |
4472 | #define HCLGE_FILTER_TYPE_PORT 1 | |
4473 | #define HCLGE_DEF_VLAN_TYPE 0x8100 | |
4474 | ||
5e43aef8 | 4475 | struct hnae3_handle *handle; |
e62f2a6b | 4476 | struct hclge_vport *vport; |
46a3df9f | 4477 | int ret; |
e62f2a6b PL |
4478 | int i; |
4479 | ||
4480 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); | |
4481 | if (ret) | |
4482 | return ret; | |
46a3df9f | 4483 | |
e62f2a6b | 4484 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); |
46a3df9f S |
4485 | if (ret) |
4486 | return ret; | |
4487 | ||
e62f2a6b PL |
4488 | hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; |
4489 | hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
4490 | hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
4491 | hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
4492 | hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
4493 | hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; | |
4494 | ||
4495 | ret = hclge_set_vlan_protocol_type(hdev); | |
5e43aef8 L |
4496 | if (ret) |
4497 | return ret; | |
46a3df9f | 4498 | |
e62f2a6b PL |
4499 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
4500 | vport = &hdev->vport[i]; | |
4501 | vport->txvlan_cfg.accept_tag = true; | |
4502 | vport->txvlan_cfg.accept_untag = true; | |
4503 | vport->txvlan_cfg.insert_tag1_en = false; | |
4504 | vport->txvlan_cfg.insert_tag2_en = false; | |
4505 | vport->txvlan_cfg.default_tag1 = 0; | |
4506 | vport->txvlan_cfg.default_tag2 = 0; | |
4507 | ||
4508 | ret = hclge_set_vlan_tx_offload_cfg(vport); | |
4509 | if (ret) | |
4510 | return ret; | |
4511 | ||
4512 | vport->rxvlan_cfg.strip_tag1_en = false; | |
4513 | vport->rxvlan_cfg.strip_tag2_en = true; | |
4514 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
4515 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
4516 | ||
4517 | ret = hclge_set_vlan_rx_offload_cfg(vport); | |
4518 | if (ret) | |
4519 | return ret; | |
4520 | } | |
4521 | ||
5e43aef8 L |
4522 | handle = &hdev->vport[0].nic; |
4523 | return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); | |
46a3df9f S |
4524 | } |
4525 | ||
5f9a7732 PL |
4526 | static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
4527 | { | |
4528 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4529 | ||
4530 | vport->rxvlan_cfg.strip_tag1_en = false; | |
4531 | vport->rxvlan_cfg.strip_tag2_en = enable; | |
4532 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; | |
4533 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; | |
4534 | ||
4535 | return hclge_set_vlan_rx_offload_cfg(vport); | |
4536 | } | |
4537 | ||
46a3df9f S |
4538 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
4539 | { | |
4540 | struct hclge_vport *vport = hclge_get_vport(handle); | |
d44f9b63 | 4541 | struct hclge_config_max_frm_size_cmd *req; |
46a3df9f S |
4542 | struct hclge_dev *hdev = vport->back; |
4543 | struct hclge_desc desc; | |
4544 | int ret; | |
4545 | ||
4546 | if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) | |
4547 | return -EINVAL; | |
4548 | ||
4549 | hdev->mps = new_mtu; | |
4550 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); | |
4551 | ||
d44f9b63 | 4552 | req = (struct hclge_config_max_frm_size_cmd *)desc.data; |
46a3df9f S |
4553 | req->max_frm_size = cpu_to_le16(new_mtu); |
4554 | ||
4555 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4556 | if (ret) { | |
4557 | dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); | |
4558 | return ret; | |
4559 | } | |
4560 | ||
4561 | return 0; | |
4562 | } | |
4563 | ||
4564 | static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, | |
4565 | bool enable) | |
4566 | { | |
d44f9b63 | 4567 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
4568 | struct hclge_desc desc; |
4569 | int ret; | |
4570 | ||
4571 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); | |
4572 | ||
d44f9b63 | 4573 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f S |
4574 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
4575 | hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); | |
4576 | ||
4577 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4578 | if (ret) { | |
4579 | dev_err(&hdev->pdev->dev, | |
4580 | "Send tqp reset cmd error, status =%d\n", ret); | |
4581 | return ret; | |
4582 | } | |
4583 | ||
4584 | return 0; | |
4585 | } | |
4586 | ||
4587 | static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) | |
4588 | { | |
d44f9b63 | 4589 | struct hclge_reset_tqp_queue_cmd *req; |
46a3df9f S |
4590 | struct hclge_desc desc; |
4591 | int ret; | |
4592 | ||
4593 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); | |
4594 | ||
d44f9b63 | 4595 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
46a3df9f S |
4596 | req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); |
4597 | ||
4598 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | |
4599 | if (ret) { | |
4600 | dev_err(&hdev->pdev->dev, | |
4601 | "Get reset status error, status =%d\n", ret); | |
4602 | return ret; | |
4603 | } | |
4604 | ||
4605 | return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); | |
4606 | } | |
4607 | ||
63d7e66f | 4608 | void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) |
46a3df9f S |
4609 | { |
4610 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4611 | struct hclge_dev *hdev = vport->back; | |
4612 | int reset_try_times = 0; | |
4613 | int reset_status; | |
4614 | int ret; | |
4615 | ||
4616 | ret = hclge_tqp_enable(hdev, queue_id, 0, false); | |
4617 | if (ret) { | |
4618 | dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); | |
4619 | return; | |
4620 | } | |
4621 | ||
4622 | ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); | |
4623 | if (ret) { | |
4624 | dev_warn(&hdev->pdev->dev, | |
4625 | "Send reset tqp cmd fail, ret = %d\n", ret); | |
4626 | return; | |
4627 | } | |
4628 | ||
4629 | reset_try_times = 0; | |
4630 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { | |
4631 | /* Wait for tqp hw reset */ | |
4632 | msleep(20); | |
4633 | reset_status = hclge_get_reset_status(hdev, queue_id); | |
4634 | if (reset_status) | |
4635 | break; | |
4636 | } | |
4637 | ||
4638 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { | |
4639 | dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); | |
4640 | return; | |
4641 | } | |
4642 | ||
4643 | ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); | |
4644 | if (ret) { | |
4645 | dev_warn(&hdev->pdev->dev, | |
4646 | "Deassert the soft reset fail, ret = %d\n", ret); | |
4647 | return; | |
4648 | } | |
4649 | } | |
4650 | ||
4651 | static u32 hclge_get_fw_version(struct hnae3_handle *handle) | |
4652 | { | |
4653 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4654 | struct hclge_dev *hdev = vport->back; | |
4655 | ||
4656 | return hdev->fw_version; | |
4657 | } | |
4658 | ||
4659 | static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, | |
4660 | u32 *rx_en, u32 *tx_en) | |
4661 | { | |
4662 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4663 | struct hclge_dev *hdev = vport->back; | |
4664 | ||
4665 | *auto_neg = hclge_get_autoneg(handle); | |
4666 | ||
4667 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { | |
4668 | *rx_en = 0; | |
4669 | *tx_en = 0; | |
4670 | return; | |
4671 | } | |
4672 | ||
4673 | if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { | |
4674 | *rx_en = 1; | |
4675 | *tx_en = 0; | |
4676 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { | |
4677 | *tx_en = 1; | |
4678 | *rx_en = 0; | |
4679 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { | |
4680 | *rx_en = 1; | |
4681 | *tx_en = 1; | |
4682 | } else { | |
4683 | *rx_en = 0; | |
4684 | *tx_en = 0; | |
4685 | } | |
4686 | } | |
4687 | ||
4688 | static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, | |
4689 | u8 *auto_neg, u32 *speed, u8 *duplex) | |
4690 | { | |
4691 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4692 | struct hclge_dev *hdev = vport->back; | |
4693 | ||
4694 | if (speed) | |
4695 | *speed = hdev->hw.mac.speed; | |
4696 | if (duplex) | |
4697 | *duplex = hdev->hw.mac.duplex; | |
4698 | if (auto_neg) | |
4699 | *auto_neg = hdev->hw.mac.autoneg; | |
4700 | } | |
4701 | ||
4702 | static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) | |
4703 | { | |
4704 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4705 | struct hclge_dev *hdev = vport->back; | |
4706 | ||
4707 | if (media_type) | |
4708 | *media_type = hdev->hw.mac.media_type; | |
4709 | } | |
4710 | ||
4711 | static void hclge_get_mdix_mode(struct hnae3_handle *handle, | |
4712 | u8 *tp_mdix_ctrl, u8 *tp_mdix) | |
4713 | { | |
4714 | struct hclge_vport *vport = hclge_get_vport(handle); | |
4715 | struct hclge_dev *hdev = vport->back; | |
4716 | struct phy_device *phydev = hdev->hw.mac.phydev; | |
4717 | int mdix_ctrl, mdix, retval, is_resolved; | |
4718 | ||
4719 | if (!phydev) { | |
4720 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
4721 | *tp_mdix = ETH_TP_MDI_INVALID; | |
4722 | return; | |
4723 | } | |
4724 | ||
4725 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); | |
4726 | ||
4727 | retval = phy_read(phydev, HCLGE_PHY_CSC_REG); | |
4728 | mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, | |
4729 | HCLGE_PHY_MDIX_CTRL_S); | |
4730 | ||
4731 | retval = phy_read(phydev, HCLGE_PHY_CSS_REG); | |
4732 | mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); | |
4733 | is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); | |
4734 | ||
4735 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); | |
4736 | ||
4737 | switch (mdix_ctrl) { | |
4738 | case 0x0: | |
4739 | *tp_mdix_ctrl = ETH_TP_MDI; | |
4740 | break; | |
4741 | case 0x1: | |
4742 | *tp_mdix_ctrl = ETH_TP_MDI_X; | |
4743 | break; | |
4744 | case 0x3: | |
4745 | *tp_mdix_ctrl = ETH_TP_MDI_AUTO; | |
4746 | break; | |
4747 | default: | |
4748 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; | |
4749 | break; | |
4750 | } | |
4751 | ||
4752 | if (!is_resolved) | |
4753 | *tp_mdix = ETH_TP_MDI_INVALID; | |
4754 | else if (mdix) | |
4755 | *tp_mdix = ETH_TP_MDI_X; | |
4756 | else | |
4757 | *tp_mdix = ETH_TP_MDI; | |
4758 | } | |
4759 | ||
4760 | static int hclge_init_client_instance(struct hnae3_client *client, | |
4761 | struct hnae3_ae_dev *ae_dev) | |
4762 | { | |
4763 | struct hclge_dev *hdev = ae_dev->priv; | |
4764 | struct hclge_vport *vport; | |
4765 | int i, ret; | |
4766 | ||
4767 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
4768 | vport = &hdev->vport[i]; | |
4769 | ||
4770 | switch (client->type) { | |
4771 | case HNAE3_CLIENT_KNIC: | |
4772 | ||
4773 | hdev->nic_client = client; | |
4774 | vport->nic.client = client; | |
4775 | ret = client->ops->init_instance(&vport->nic); | |
4776 | if (ret) | |
4777 | goto err; | |
4778 | ||
4779 | if (hdev->roce_client && | |
e92a0843 | 4780 | hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
4781 | struct hnae3_client *rc = hdev->roce_client; |
4782 | ||
4783 | ret = hclge_init_roce_base_info(vport); | |
4784 | if (ret) | |
4785 | goto err; | |
4786 | ||
4787 | ret = rc->ops->init_instance(&vport->roce); | |
4788 | if (ret) | |
4789 | goto err; | |
4790 | } | |
4791 | ||
4792 | break; | |
4793 | case HNAE3_CLIENT_UNIC: | |
4794 | hdev->nic_client = client; | |
4795 | vport->nic.client = client; | |
4796 | ||
4797 | ret = client->ops->init_instance(&vport->nic); | |
4798 | if (ret) | |
4799 | goto err; | |
4800 | ||
4801 | break; | |
4802 | case HNAE3_CLIENT_ROCE: | |
e92a0843 | 4803 | if (hnae3_dev_roce_supported(hdev)) { |
46a3df9f S |
4804 | hdev->roce_client = client; |
4805 | vport->roce.client = client; | |
4806 | } | |
4807 | ||
3a46f34d | 4808 | if (hdev->roce_client && hdev->nic_client) { |
46a3df9f S |
4809 | ret = hclge_init_roce_base_info(vport); |
4810 | if (ret) | |
4811 | goto err; | |
4812 | ||
4813 | ret = client->ops->init_instance(&vport->roce); | |
4814 | if (ret) | |
4815 | goto err; | |
4816 | } | |
4817 | } | |
4818 | } | |
4819 | ||
4820 | return 0; | |
4821 | err: | |
4822 | return ret; | |
4823 | } | |
4824 | ||
4825 | static void hclge_uninit_client_instance(struct hnae3_client *client, | |
4826 | struct hnae3_ae_dev *ae_dev) | |
4827 | { | |
4828 | struct hclge_dev *hdev = ae_dev->priv; | |
4829 | struct hclge_vport *vport; | |
4830 | int i; | |
4831 | ||
4832 | for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { | |
4833 | vport = &hdev->vport[i]; | |
a17dcf3f | 4834 | if (hdev->roce_client) { |
46a3df9f S |
4835 | hdev->roce_client->ops->uninit_instance(&vport->roce, |
4836 | 0); | |
a17dcf3f L |
4837 | hdev->roce_client = NULL; |
4838 | vport->roce.client = NULL; | |
4839 | } | |
46a3df9f S |
4840 | if (client->type == HNAE3_CLIENT_ROCE) |
4841 | return; | |
a17dcf3f | 4842 | if (client->ops->uninit_instance) { |
46a3df9f | 4843 | client->ops->uninit_instance(&vport->nic, 0); |
a17dcf3f L |
4844 | hdev->nic_client = NULL; |
4845 | vport->nic.client = NULL; | |
4846 | } | |
46a3df9f S |
4847 | } |
4848 | } | |
4849 | ||
4850 | static int hclge_pci_init(struct hclge_dev *hdev) | |
4851 | { | |
4852 | struct pci_dev *pdev = hdev->pdev; | |
4853 | struct hclge_hw *hw; | |
4854 | int ret; | |
4855 | ||
4856 | ret = pci_enable_device(pdev); | |
4857 | if (ret) { | |
4858 | dev_err(&pdev->dev, "failed to enable PCI device\n"); | |
4859 | goto err_no_drvdata; | |
4860 | } | |
4861 | ||
4862 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
4863 | if (ret) { | |
4864 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
4865 | if (ret) { | |
4866 | dev_err(&pdev->dev, | |
4867 | "can't set consistent PCI DMA"); | |
4868 | goto err_disable_device; | |
4869 | } | |
4870 | dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); | |
4871 | } | |
4872 | ||
4873 | ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); | |
4874 | if (ret) { | |
4875 | dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); | |
4876 | goto err_disable_device; | |
4877 | } | |
4878 | ||
4879 | pci_set_master(pdev); | |
4880 | hw = &hdev->hw; | |
4881 | hw->back = hdev; | |
4882 | hw->io_base = pcim_iomap(pdev, 2, 0); | |
4883 | if (!hw->io_base) { | |
4884 | dev_err(&pdev->dev, "Can't map configuration register space\n"); | |
4885 | ret = -ENOMEM; | |
4886 | goto err_clr_master; | |
4887 | } | |
4888 | ||
709eb41a L |
4889 | hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); |
4890 | ||
46a3df9f S |
4891 | return 0; |
4892 | err_clr_master: | |
4893 | pci_clear_master(pdev); | |
4894 | pci_release_regions(pdev); | |
4895 | err_disable_device: | |
4896 | pci_disable_device(pdev); | |
4897 | err_no_drvdata: | |
4898 | pci_set_drvdata(pdev, NULL); | |
4899 | ||
4900 | return ret; | |
4901 | } | |
4902 | ||
4903 | static void hclge_pci_uninit(struct hclge_dev *hdev) | |
4904 | { | |
4905 | struct pci_dev *pdev = hdev->pdev; | |
4906 | ||
887c3820 | 4907 | pci_free_irq_vectors(pdev); |
46a3df9f S |
4908 | pci_clear_master(pdev); |
4909 | pci_release_mem_regions(pdev); | |
4910 | pci_disable_device(pdev); | |
4911 | } | |
4912 | ||
4913 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) | |
4914 | { | |
4915 | struct pci_dev *pdev = ae_dev->pdev; | |
46a3df9f S |
4916 | struct hclge_dev *hdev; |
4917 | int ret; | |
4918 | ||
4919 | hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); | |
4920 | if (!hdev) { | |
4921 | ret = -ENOMEM; | |
4922 | goto err_hclge_dev; | |
4923 | } | |
4924 | ||
46a3df9f S |
4925 | hdev->pdev = pdev; |
4926 | hdev->ae_dev = ae_dev; | |
4ed340ab | 4927 | hdev->reset_type = HNAE3_NONE_RESET; |
ed4a1bb8 | 4928 | hdev->reset_request = 0; |
202f2014 | 4929 | hdev->reset_pending = 0; |
46a3df9f S |
4930 | ae_dev->priv = hdev; |
4931 | ||
46a3df9f S |
4932 | ret = hclge_pci_init(hdev); |
4933 | if (ret) { | |
4934 | dev_err(&pdev->dev, "PCI init failed\n"); | |
4935 | goto err_pci_init; | |
4936 | } | |
4937 | ||
3efb960f L |
4938 | /* Firmware command queue initialize */ |
4939 | ret = hclge_cmd_queue_init(hdev); | |
4940 | if (ret) { | |
4941 | dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); | |
4942 | return ret; | |
4943 | } | |
4944 | ||
4945 | /* Firmware command initialize */ | |
46a3df9f S |
4946 | ret = hclge_cmd_init(hdev); |
4947 | if (ret) | |
4948 | goto err_cmd_init; | |
4949 | ||
4950 | ret = hclge_get_cap(hdev); | |
4951 | if (ret) { | |
e00e2197 CIK |
4952 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", |
4953 | ret); | |
46a3df9f S |
4954 | return ret; |
4955 | } | |
4956 | ||
4957 | ret = hclge_configure(hdev); | |
4958 | if (ret) { | |
4959 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
4960 | return ret; | |
4961 | } | |
4962 | ||
887c3820 | 4963 | ret = hclge_init_msi(hdev); |
46a3df9f | 4964 | if (ret) { |
887c3820 | 4965 | dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); |
46a3df9f S |
4966 | return ret; |
4967 | } | |
4968 | ||
466b0c00 L |
4969 | ret = hclge_misc_irq_init(hdev); |
4970 | if (ret) { | |
4971 | dev_err(&pdev->dev, | |
4972 | "Misc IRQ(vector0) init error, ret = %d.\n", | |
4973 | ret); | |
4974 | return ret; | |
4975 | } | |
4976 | ||
46a3df9f S |
4977 | ret = hclge_alloc_tqps(hdev); |
4978 | if (ret) { | |
4979 | dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); | |
4980 | return ret; | |
4981 | } | |
4982 | ||
4983 | ret = hclge_alloc_vport(hdev); | |
4984 | if (ret) { | |
4985 | dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); | |
4986 | return ret; | |
4987 | } | |
4988 | ||
7df7dad6 L |
4989 | ret = hclge_map_tqp(hdev); |
4990 | if (ret) { | |
4991 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
4992 | return ret; | |
4993 | } | |
4994 | ||
cf9cca2d | 4995 | ret = hclge_mac_mdio_config(hdev); |
4996 | if (ret) { | |
4997 | dev_warn(&hdev->pdev->dev, | |
4998 | "mdio config fail ret=%d\n", ret); | |
4999 | return ret; | |
5000 | } | |
5001 | ||
46a3df9f S |
5002 | ret = hclge_mac_init(hdev); |
5003 | if (ret) { | |
5004 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
5005 | return ret; | |
5006 | } | |
5007 | ret = hclge_buffer_alloc(hdev); | |
5008 | if (ret) { | |
5009 | dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); | |
5010 | return ret; | |
5011 | } | |
5012 | ||
5013 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); | |
5014 | if (ret) { | |
5015 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
5016 | return ret; | |
5017 | } | |
5018 | ||
46a3df9f S |
5019 | ret = hclge_init_vlan_config(hdev); |
5020 | if (ret) { | |
5021 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
5022 | return ret; | |
5023 | } | |
5024 | ||
5025 | ret = hclge_tm_schd_init(hdev); | |
5026 | if (ret) { | |
5027 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
5028 | return ret; | |
68ece54e YL |
5029 | } |
5030 | ||
5031 | ret = hclge_rss_init_hw(hdev); | |
5032 | if (ret) { | |
5033 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
5034 | return ret; | |
46a3df9f S |
5035 | } |
5036 | ||
cacde272 YL |
5037 | hclge_dcb_ops_set(hdev); |
5038 | ||
d039ef68 | 5039 | timer_setup(&hdev->service_timer, hclge_service_timer, 0); |
46a3df9f | 5040 | INIT_WORK(&hdev->service_task, hclge_service_task); |
ed4a1bb8 | 5041 | INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); |
22fd3468 | 5042 | INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); |
46a3df9f | 5043 | |
466b0c00 L |
5044 | /* Enable MISC vector(vector0) */ |
5045 | hclge_enable_vector(&hdev->misc_vector, true); | |
5046 | ||
46a3df9f S |
5047 | set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); |
5048 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
ed4a1bb8 SM |
5049 | clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); |
5050 | clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); | |
22fd3468 SM |
5051 | clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); |
5052 | clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); | |
46a3df9f S |
5053 | |
5054 | pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); | |
5055 | return 0; | |
5056 | ||
5057 | err_cmd_init: | |
5058 | pci_release_regions(pdev); | |
5059 | err_pci_init: | |
5060 | pci_set_drvdata(pdev, NULL); | |
5061 | err_hclge_dev: | |
5062 | return ret; | |
5063 | } | |
5064 | ||
c6dc5213 | 5065 | static void hclge_stats_clear(struct hclge_dev *hdev) |
5066 | { | |
5067 | memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); | |
5068 | } | |
5069 | ||
4ed340ab L |
5070 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) |
5071 | { | |
5072 | struct hclge_dev *hdev = ae_dev->priv; | |
5073 | struct pci_dev *pdev = ae_dev->pdev; | |
5074 | int ret; | |
5075 | ||
5076 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
5077 | ||
c6dc5213 | 5078 | hclge_stats_clear(hdev); |
5079 | ||
4ed340ab L |
5080 | ret = hclge_cmd_init(hdev); |
5081 | if (ret) { | |
5082 | dev_err(&pdev->dev, "Cmd queue init failed\n"); | |
5083 | return ret; | |
5084 | } | |
5085 | ||
5086 | ret = hclge_get_cap(hdev); | |
5087 | if (ret) { | |
5088 | dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", | |
5089 | ret); | |
5090 | return ret; | |
5091 | } | |
5092 | ||
5093 | ret = hclge_configure(hdev); | |
5094 | if (ret) { | |
5095 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); | |
5096 | return ret; | |
5097 | } | |
5098 | ||
5099 | ret = hclge_map_tqp(hdev); | |
5100 | if (ret) { | |
5101 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); | |
5102 | return ret; | |
5103 | } | |
5104 | ||
5105 | ret = hclge_mac_init(hdev); | |
5106 | if (ret) { | |
5107 | dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); | |
5108 | return ret; | |
5109 | } | |
5110 | ||
5111 | ret = hclge_buffer_alloc(hdev); | |
5112 | if (ret) { | |
5113 | dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); | |
5114 | return ret; | |
5115 | } | |
5116 | ||
5117 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); | |
5118 | if (ret) { | |
5119 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); | |
5120 | return ret; | |
5121 | } | |
5122 | ||
5123 | ret = hclge_init_vlan_config(hdev); | |
5124 | if (ret) { | |
5125 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | |
5126 | return ret; | |
5127 | } | |
5128 | ||
5129 | ret = hclge_tm_schd_init(hdev); | |
5130 | if (ret) { | |
5131 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
5132 | return ret; | |
5133 | } | |
5134 | ||
5135 | ret = hclge_rss_init_hw(hdev); | |
5136 | if (ret) { | |
5137 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | |
5138 | return ret; | |
5139 | } | |
5140 | ||
5141 | /* Enable MISC vector(vector0) */ | |
5142 | hclge_enable_vector(&hdev->misc_vector, true); | |
5143 | ||
5144 | dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", | |
5145 | HCLGE_DRIVER_NAME); | |
5146 | ||
5147 | return 0; | |
5148 | } | |
5149 | ||
46a3df9f S |
5150 | static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
5151 | { | |
5152 | struct hclge_dev *hdev = ae_dev->priv; | |
5153 | struct hclge_mac *mac = &hdev->hw.mac; | |
5154 | ||
5155 | set_bit(HCLGE_STATE_DOWN, &hdev->state); | |
5156 | ||
2a32ca13 AB |
5157 | if (IS_ENABLED(CONFIG_PCI_IOV)) |
5158 | hclge_disable_sriov(hdev); | |
46a3df9f | 5159 | |
d039ef68 | 5160 | if (hdev->service_timer.function) |
46a3df9f S |
5161 | del_timer_sync(&hdev->service_timer); |
5162 | if (hdev->service_task.func) | |
5163 | cancel_work_sync(&hdev->service_task); | |
ed4a1bb8 SM |
5164 | if (hdev->rst_service_task.func) |
5165 | cancel_work_sync(&hdev->rst_service_task); | |
22fd3468 SM |
5166 | if (hdev->mbx_service_task.func) |
5167 | cancel_work_sync(&hdev->mbx_service_task); | |
46a3df9f S |
5168 | |
5169 | if (mac->phydev) | |
5170 | mdiobus_unregister(mac->mdio_bus); | |
5171 | ||
466b0c00 L |
5172 | /* Disable MISC vector(vector0) */ |
5173 | hclge_enable_vector(&hdev->misc_vector, false); | |
46a3df9f | 5174 | hclge_destroy_cmd_queue(&hdev->hw); |
202f2014 | 5175 | hclge_misc_irq_uninit(hdev); |
46a3df9f S |
5176 | hclge_pci_uninit(hdev); |
5177 | ae_dev->priv = NULL; | |
5178 | } | |
5179 | ||
4f645a90 PL |
5180 | static u32 hclge_get_max_channels(struct hnae3_handle *handle) |
5181 | { | |
5182 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
5183 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5184 | struct hclge_dev *hdev = vport->back; | |
5185 | ||
5186 | return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); | |
5187 | } | |
5188 | ||
5189 | static void hclge_get_channels(struct hnae3_handle *handle, | |
5190 | struct ethtool_channels *ch) | |
5191 | { | |
5192 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5193 | ||
5194 | ch->max_combined = hclge_get_max_channels(handle); | |
5195 | ch->other_count = 1; | |
5196 | ch->max_other = 1; | |
5197 | ch->combined_count = vport->alloc_tqps; | |
5198 | } | |
5199 | ||
f1f779ce PL |
5200 | static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, |
5201 | u16 *free_tqps, u16 *max_rss_size) | |
5202 | { | |
5203 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5204 | struct hclge_dev *hdev = vport->back; | |
5205 | u16 temp_tqps = 0; | |
5206 | int i; | |
5207 | ||
5208 | for (i = 0; i < hdev->num_tqps; i++) { | |
5209 | if (!hdev->htqp[i].alloced) | |
5210 | temp_tqps++; | |
5211 | } | |
5212 | *free_tqps = temp_tqps; | |
5213 | *max_rss_size = hdev->rss_size_max; | |
5214 | } | |
5215 | ||
5216 | static void hclge_release_tqp(struct hclge_vport *vport) | |
5217 | { | |
5218 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
5219 | struct hclge_dev *hdev = vport->back; | |
5220 | int i; | |
5221 | ||
5222 | for (i = 0; i < kinfo->num_tqps; i++) { | |
5223 | struct hclge_tqp *tqp = | |
5224 | container_of(kinfo->tqp[i], struct hclge_tqp, q); | |
5225 | ||
5226 | tqp->q.handle = NULL; | |
5227 | tqp->q.tqp_index = 0; | |
5228 | tqp->alloced = false; | |
5229 | } | |
5230 | ||
5231 | devm_kfree(&hdev->pdev->dev, kinfo->tqp); | |
5232 | kinfo->tqp = NULL; | |
5233 | } | |
5234 | ||
5235 | static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) | |
5236 | { | |
5237 | struct hclge_vport *vport = hclge_get_vport(handle); | |
5238 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; | |
5239 | struct hclge_dev *hdev = vport->back; | |
5240 | int cur_rss_size = kinfo->rss_size; | |
5241 | int cur_tqps = kinfo->num_tqps; | |
5242 | u16 tc_offset[HCLGE_MAX_TC_NUM]; | |
5243 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | |
5244 | u16 tc_size[HCLGE_MAX_TC_NUM]; | |
5245 | u16 roundup_size; | |
5246 | u32 *rss_indir; | |
5247 | int ret, i; | |
5248 | ||
5249 | hclge_release_tqp(vport); | |
5250 | ||
5251 | ret = hclge_knic_setup(vport, new_tqps_num); | |
5252 | if (ret) { | |
5253 | dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); | |
5254 | return ret; | |
5255 | } | |
5256 | ||
5257 | ret = hclge_map_tqp_to_vport(hdev, vport); | |
5258 | if (ret) { | |
5259 | dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); | |
5260 | return ret; | |
5261 | } | |
5262 | ||
5263 | ret = hclge_tm_schd_init(hdev); | |
5264 | if (ret) { | |
5265 | dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); | |
5266 | return ret; | |
5267 | } | |
5268 | ||
5269 | roundup_size = roundup_pow_of_two(kinfo->rss_size); | |
5270 | roundup_size = ilog2(roundup_size); | |
5271 | /* Set the RSS TC mode according to the new RSS size */ | |
5272 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | |
5273 | tc_valid[i] = 0; | |
5274 | ||
5275 | if (!(hdev->hw_tc_map & BIT(i))) | |
5276 | continue; | |
5277 | ||
5278 | tc_valid[i] = 1; | |
5279 | tc_size[i] = roundup_size; | |
5280 | tc_offset[i] = kinfo->rss_size * i; | |
5281 | } | |
5282 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); | |
5283 | if (ret) | |
5284 | return ret; | |
5285 | ||
5286 | /* Reinitializes the rss indirect table according to the new RSS size */ | |
5287 | rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); | |
5288 | if (!rss_indir) | |
5289 | return -ENOMEM; | |
5290 | ||
5291 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) | |
5292 | rss_indir[i] = i % kinfo->rss_size; | |
5293 | ||
5294 | ret = hclge_set_rss(handle, rss_indir, NULL, 0); | |
5295 | if (ret) | |
5296 | dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", | |
5297 | ret); | |
5298 | ||
5299 | kfree(rss_indir); | |
5300 | ||
5301 | if (!ret) | |
5302 | dev_info(&hdev->pdev->dev, | |
5303 | "Channels changed, rss_size from %d to %d, tqps from %d to %d", | |
5304 | cur_rss_size, kinfo->rss_size, | |
5305 | cur_tqps, kinfo->rss_size * kinfo->num_tc); | |
5306 | ||
5307 | return ret; | |
5308 | } | |
5309 | ||
46a3df9f S |
5310 | static const struct hnae3_ae_ops hclge_ops = { |
5311 | .init_ae_dev = hclge_init_ae_dev, | |
5312 | .uninit_ae_dev = hclge_uninit_ae_dev, | |
5313 | .init_client_instance = hclge_init_client_instance, | |
5314 | .uninit_client_instance = hclge_uninit_client_instance, | |
63d7e66f SM |
5315 | .map_ring_to_vector = hclge_map_ring_to_vector, |
5316 | .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, | |
46a3df9f S |
5317 | .get_vector = hclge_get_vector, |
5318 | .set_promisc_mode = hclge_set_promisc_mode, | |
c39c4d98 | 5319 | .set_loopback = hclge_set_loopback, |
46a3df9f S |
5320 | .start = hclge_ae_start, |
5321 | .stop = hclge_ae_stop, | |
5322 | .get_status = hclge_get_status, | |
5323 | .get_ksettings_an_result = hclge_get_ksettings_an_result, | |
5324 | .update_speed_duplex_h = hclge_update_speed_duplex_h, | |
5325 | .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, | |
5326 | .get_media_type = hclge_get_media_type, | |
5327 | .get_rss_key_size = hclge_get_rss_key_size, | |
5328 | .get_rss_indir_size = hclge_get_rss_indir_size, | |
5329 | .get_rss = hclge_get_rss, | |
5330 | .set_rss = hclge_set_rss, | |
f7db940a | 5331 | .set_rss_tuple = hclge_set_rss_tuple, |
07d29954 | 5332 | .get_rss_tuple = hclge_get_rss_tuple, |
46a3df9f S |
5333 | .get_tc_size = hclge_get_tc_size, |
5334 | .get_mac_addr = hclge_get_mac_addr, | |
5335 | .set_mac_addr = hclge_set_mac_addr, | |
5336 | .add_uc_addr = hclge_add_uc_addr, | |
5337 | .rm_uc_addr = hclge_rm_uc_addr, | |
5338 | .add_mc_addr = hclge_add_mc_addr, | |
5339 | .rm_mc_addr = hclge_rm_mc_addr, | |
5340 | .set_autoneg = hclge_set_autoneg, | |
5341 | .get_autoneg = hclge_get_autoneg, | |
5342 | .get_pauseparam = hclge_get_pauseparam, | |
5343 | .set_mtu = hclge_set_mtu, | |
5344 | .reset_queue = hclge_reset_tqp, | |
5345 | .get_stats = hclge_get_stats, | |
5346 | .update_stats = hclge_update_stats, | |
5347 | .get_strings = hclge_get_strings, | |
5348 | .get_sset_count = hclge_get_sset_count, | |
5349 | .get_fw_version = hclge_get_fw_version, | |
5350 | .get_mdix_mode = hclge_get_mdix_mode, | |
5351 | .set_vlan_filter = hclge_set_port_vlan_filter, | |
5352 | .set_vf_vlan_filter = hclge_set_vf_vlan_filter, | |
5f9a7732 | 5353 | .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, |
4ed340ab | 5354 | .reset_event = hclge_reset_event, |
f1f779ce PL |
5355 | .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, |
5356 | .set_channels = hclge_set_channels, | |
4f645a90 | 5357 | .get_channels = hclge_get_channels, |
46a3df9f S |
5358 | }; |
5359 | ||
5360 | static struct hnae3_ae_algo ae_algo = { | |
5361 | .ops = &hclge_ops, | |
5362 | .name = HCLGE_NAME, | |
5363 | .pdev_id_table = ae_algo_pci_tbl, | |
5364 | }; | |
5365 | ||
5366 | static int hclge_init(void) | |
5367 | { | |
5368 | pr_info("%s is initializing\n", HCLGE_NAME); | |
5369 | ||
5370 | return hnae3_register_ae_algo(&ae_algo); | |
5371 | } | |
5372 | ||
5373 | static void hclge_exit(void) | |
5374 | { | |
5375 | hnae3_unregister_ae_algo(&ae_algo); | |
5376 | } | |
5377 | module_init(hclge_init); | |
5378 | module_exit(hclge_exit); | |
5379 | ||
5380 | MODULE_LICENSE("GPL"); | |
5381 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); | |
5382 | MODULE_DESCRIPTION("HCLGE Driver"); | |
5383 | MODULE_VERSION(HCLGE_MOD_VERSION); |