]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
net: hns3: Correct unreasonable code comments
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.h
1 /*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #ifndef __HCLGE_MAIN_H
11 #define __HCLGE_MAIN_H
12 #include <linux/fs.h>
13 #include <linux/types.h>
14 #include <linux/phy.h>
15 #include <linux/if_vlan.h>
16
17 #include "hclge_cmd.h"
18 #include "hnae3.h"
19
20 #define HCLGE_MOD_VERSION "1.0"
21 #define HCLGE_DRIVER_NAME "hclge"
22
23 #define HCLGE_INVALID_VPORT 0xffff
24
25 #define HCLGE_ROCE_VECTOR_OFFSET 96
26
27 #define HCLGE_PF_CFG_BLOCK_SIZE 32
28 #define HCLGE_PF_CFG_DESC_NUM \
29 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
30
31 #define HCLGE_VECTOR_REG_BASE 0x20000
32 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400
33
34 #define HCLGE_VECTOR_REG_OFFSET 0x4
35 #define HCLGE_VECTOR_VF_OFFSET 0x100000
36
37 #define HCLGE_RSS_IND_TBL_SIZE 512
38 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
39 #define HCLGE_RSS_KEY_SIZE 40
40 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
41 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1
42 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
43 #define HCLGE_RSS_HASH_ALGO_MASK 0xf
44 #define HCLGE_RSS_CFG_TBL_NUM \
45 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
46
47 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
48 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
49 #define HCLGE_D_PORT_BIT BIT(0)
50 #define HCLGE_S_PORT_BIT BIT(1)
51 #define HCLGE_D_IP_BIT BIT(2)
52 #define HCLGE_S_IP_BIT BIT(3)
53 #define HCLGE_V_TAG_BIT BIT(4)
54
55 #define HCLGE_RSS_TC_SIZE_0 1
56 #define HCLGE_RSS_TC_SIZE_1 2
57 #define HCLGE_RSS_TC_SIZE_2 4
58 #define HCLGE_RSS_TC_SIZE_3 8
59 #define HCLGE_RSS_TC_SIZE_4 16
60 #define HCLGE_RSS_TC_SIZE_5 32
61 #define HCLGE_RSS_TC_SIZE_6 64
62 #define HCLGE_RSS_TC_SIZE_7 128
63
64 #define HCLGE_MTA_TBL_SIZE 4096
65
66 #define HCLGE_TQP_RESET_TRY_TIMES 10
67
68 #define HCLGE_PHY_PAGE_MDIX 0
69 #define HCLGE_PHY_PAGE_COPPER 0
70
71 /* Page Selection Reg. */
72 #define HCLGE_PHY_PAGE_REG 22
73
74 /* Copper Specific Control Register */
75 #define HCLGE_PHY_CSC_REG 16
76
77 /* Copper Specific Status Register */
78 #define HCLGE_PHY_CSS_REG 17
79
80 #define HCLGE_PHY_MDIX_CTRL_S 5
81 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
82
83 #define HCLGE_PHY_MDIX_STATUS_B 6
84 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
85
86 /* Factor used to calculate offset and bitmap of VF num */
87 #define HCLGE_VF_NUM_PER_CMD 64
88 #define HCLGE_VF_NUM_PER_BYTE 8
89
90 /* Reset related Registers */
91 #define HCLGE_MISC_RESET_STS_REG 0x20700
92 #define HCLGE_MISC_VECTOR_INT_STS 0x20800
93 #define HCLGE_GLOBAL_RESET_REG 0x20A00
94 #define HCLGE_GLOBAL_RESET_BIT 0x0
95 #define HCLGE_CORE_RESET_BIT 0x1
96 #define HCLGE_FUN_RST_ING 0x20C00
97 #define HCLGE_FUN_RST_ING_B 0
98
99 /* Vector0 register bits define */
100 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
101 #define HCLGE_VECTOR0_CORERESET_INT_B 6
102 #define HCLGE_VECTOR0_IMPRESET_INT_B 7
103
104 /* Vector0 interrupt CMDQ event source register(RW) */
105 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
106 /* CMDQ register bits for RX event(=MBX event) */
107 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
108
109 #define HCLGE_MAC_DEFAULT_FRAME \
110 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
111 #define HCLGE_MAC_MIN_FRAME 64
112 #define HCLGE_MAC_MAX_FRAME 9728
113
114 #define HCLGE_SUPPORT_1G_BIT BIT(0)
115 #define HCLGE_SUPPORT_10G_BIT BIT(1)
116 #define HCLGE_SUPPORT_25G_BIT BIT(2)
117 #define HCLGE_SUPPORT_50G_BIT BIT(3)
118 #define HCLGE_SUPPORT_100G_BIT BIT(4)
119
120 enum HCLGE_DEV_STATE {
121 HCLGE_STATE_REINITING,
122 HCLGE_STATE_DOWN,
123 HCLGE_STATE_DISABLED,
124 HCLGE_STATE_REMOVING,
125 HCLGE_STATE_SERVICE_INITED,
126 HCLGE_STATE_SERVICE_SCHED,
127 HCLGE_STATE_RST_SERVICE_SCHED,
128 HCLGE_STATE_RST_HANDLING,
129 HCLGE_STATE_MBX_SERVICE_SCHED,
130 HCLGE_STATE_MBX_HANDLING,
131 HCLGE_STATE_STATISTICS_UPDATING,
132 HCLGE_STATE_CMD_DISABLE,
133 HCLGE_STATE_MAX
134 };
135
136 enum hclge_evt_cause {
137 HCLGE_VECTOR0_EVENT_RST,
138 HCLGE_VECTOR0_EVENT_MBX,
139 HCLGE_VECTOR0_EVENT_OTHER,
140 };
141
142 #define HCLGE_MPF_ENBALE 1
143 struct hclge_caps {
144 u16 num_tqp;
145 u16 num_buffer_cell;
146 u32 flag;
147 u16 vmdq;
148 };
149
150 enum HCLGE_MAC_SPEED {
151 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
152 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
153 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
154 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */
155 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
156 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
157 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
158 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
159 };
160
161 enum HCLGE_MAC_DUPLEX {
162 HCLGE_MAC_HALF,
163 HCLGE_MAC_FULL
164 };
165
166 enum hclge_mta_dmac_sel_type {
167 HCLGE_MAC_ADDR_47_36,
168 HCLGE_MAC_ADDR_46_35,
169 HCLGE_MAC_ADDR_45_34,
170 HCLGE_MAC_ADDR_44_33,
171 };
172
173 struct hclge_mac {
174 u8 phy_addr;
175 u8 flag;
176 u8 media_type;
177 u8 mac_addr[ETH_ALEN];
178 u8 autoneg;
179 u8 duplex;
180 u32 speed;
181 int link; /* store the link status of mac & phy (if phy exit)*/
182 struct phy_device *phydev;
183 struct mii_bus *mdio_bus;
184 phy_interface_t phy_if;
185 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
186 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
187 };
188
189 struct hclge_hw {
190 void __iomem *io_base;
191 struct hclge_mac mac;
192 int num_vec;
193 struct hclge_cmq cmq;
194 struct hclge_caps caps;
195 };
196
197 /* TQP stats */
198 struct hlcge_tqp_stats {
199 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
200 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
201 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
202 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
203 };
204
205 struct hclge_tqp {
206 /* copy of device pointer from pci_dev,
207 * used when perform DMA mapping
208 */
209 struct device *dev;
210 struct hnae3_queue q;
211 struct hlcge_tqp_stats tqp_stats;
212 u16 index; /* Global index in a NIC controller */
213
214 bool alloced;
215 };
216
217 enum hclge_fc_mode {
218 HCLGE_FC_NONE,
219 HCLGE_FC_RX_PAUSE,
220 HCLGE_FC_TX_PAUSE,
221 HCLGE_FC_FULL,
222 HCLGE_FC_PFC,
223 HCLGE_FC_DEFAULT
224 };
225
226 #define HCLGE_PG_NUM 4
227 #define HCLGE_SCH_MODE_SP 0
228 #define HCLGE_SCH_MODE_DWRR 1
229 struct hclge_pg_info {
230 u8 pg_id;
231 u8 pg_sch_mode; /* 0: sp; 1: dwrr */
232 u8 tc_bit_map;
233 u32 bw_limit;
234 u8 tc_dwrr[HNAE3_MAX_TC];
235 };
236
237 struct hclge_tc_info {
238 u8 tc_id;
239 u8 tc_sch_mode; /* 0: sp; 1: dwrr */
240 u8 pgid;
241 u32 bw_limit;
242 };
243
244 struct hclge_cfg {
245 u8 vmdq_vport_num;
246 u8 tc_num;
247 u16 tqp_desc_num;
248 u16 rx_buf_len;
249 u16 rss_size_max;
250 u8 phy_addr;
251 u8 media_type;
252 u8 mac_addr[ETH_ALEN];
253 u8 default_speed;
254 u32 numa_node_map;
255 u8 speed_ability;
256 };
257
258 struct hclge_tm_info {
259 u8 num_tc;
260 u8 num_pg; /* It must be 1 if vNET-Base schd */
261 u8 pg_dwrr[HCLGE_PG_NUM];
262 u8 prio_tc[HNAE3_MAX_USER_PRIO];
263 struct hclge_pg_info pg_info[HCLGE_PG_NUM];
264 struct hclge_tc_info tc_info[HNAE3_MAX_TC];
265 enum hclge_fc_mode fc_mode;
266 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
267 };
268
269 struct hclge_comm_stats_str {
270 char desc[ETH_GSTRING_LEN];
271 unsigned long offset;
272 };
273
274 /* all 64bit stats, opcode id: 0x0030 */
275 struct hclge_64_bit_stats {
276 /* query_igu_stat */
277 u64 igu_rx_oversize_pkt;
278 u64 igu_rx_undersize_pkt;
279 u64 igu_rx_out_all_pkt;
280 u64 igu_rx_uni_pkt;
281 u64 igu_rx_multi_pkt;
282 u64 igu_rx_broad_pkt;
283 u64 rsv0;
284
285 /* query_egu_stat */
286 u64 egu_tx_out_all_pkt;
287 u64 egu_tx_uni_pkt;
288 u64 egu_tx_multi_pkt;
289 u64 egu_tx_broad_pkt;
290
291 /* ssu_ppp packet stats */
292 u64 ssu_ppp_mac_key_num;
293 u64 ssu_ppp_host_key_num;
294 u64 ppp_ssu_mac_rlt_num;
295 u64 ppp_ssu_host_rlt_num;
296
297 /* ssu_tx_in_out_dfx_stats */
298 u64 ssu_tx_in_num;
299 u64 ssu_tx_out_num;
300 /* ssu_rx_in_out_dfx_stats */
301 u64 ssu_rx_in_num;
302 u64 ssu_rx_out_num;
303 };
304
305 /* all 32bit stats, opcode id: 0x0031 */
306 struct hclge_32_bit_stats {
307 u64 igu_rx_err_pkt;
308 u64 igu_rx_no_eof_pkt;
309 u64 igu_rx_no_sof_pkt;
310 u64 egu_tx_1588_pkt;
311 u64 egu_tx_err_pkt;
312 u64 ssu_full_drop_num;
313 u64 ssu_part_drop_num;
314 u64 ppp_key_drop_num;
315 u64 ppp_rlt_drop_num;
316 u64 ssu_key_drop_num;
317 u64 pkt_curr_buf_cnt;
318 u64 qcn_fb_rcv_cnt;
319 u64 qcn_fb_drop_cnt;
320 u64 qcn_fb_invaild_cnt;
321 u64 rsv0;
322 u64 rx_packet_tc0_in_cnt;
323 u64 rx_packet_tc1_in_cnt;
324 u64 rx_packet_tc2_in_cnt;
325 u64 rx_packet_tc3_in_cnt;
326 u64 rx_packet_tc4_in_cnt;
327 u64 rx_packet_tc5_in_cnt;
328 u64 rx_packet_tc6_in_cnt;
329 u64 rx_packet_tc7_in_cnt;
330 u64 rx_packet_tc0_out_cnt;
331 u64 rx_packet_tc1_out_cnt;
332 u64 rx_packet_tc2_out_cnt;
333 u64 rx_packet_tc3_out_cnt;
334 u64 rx_packet_tc4_out_cnt;
335 u64 rx_packet_tc5_out_cnt;
336 u64 rx_packet_tc6_out_cnt;
337 u64 rx_packet_tc7_out_cnt;
338
339 /* Tx packet level statistics */
340 u64 tx_packet_tc0_in_cnt;
341 u64 tx_packet_tc1_in_cnt;
342 u64 tx_packet_tc2_in_cnt;
343 u64 tx_packet_tc3_in_cnt;
344 u64 tx_packet_tc4_in_cnt;
345 u64 tx_packet_tc5_in_cnt;
346 u64 tx_packet_tc6_in_cnt;
347 u64 tx_packet_tc7_in_cnt;
348 u64 tx_packet_tc0_out_cnt;
349 u64 tx_packet_tc1_out_cnt;
350 u64 tx_packet_tc2_out_cnt;
351 u64 tx_packet_tc3_out_cnt;
352 u64 tx_packet_tc4_out_cnt;
353 u64 tx_packet_tc5_out_cnt;
354 u64 tx_packet_tc6_out_cnt;
355 u64 tx_packet_tc7_out_cnt;
356
357 /* packet buffer statistics */
358 u64 pkt_curr_buf_tc0_cnt;
359 u64 pkt_curr_buf_tc1_cnt;
360 u64 pkt_curr_buf_tc2_cnt;
361 u64 pkt_curr_buf_tc3_cnt;
362 u64 pkt_curr_buf_tc4_cnt;
363 u64 pkt_curr_buf_tc5_cnt;
364 u64 pkt_curr_buf_tc6_cnt;
365 u64 pkt_curr_buf_tc7_cnt;
366
367 u64 mb_uncopy_num;
368 u64 lo_pri_unicast_rlt_drop_num;
369 u64 hi_pri_multicast_rlt_drop_num;
370 u64 lo_pri_multicast_rlt_drop_num;
371 u64 rx_oq_drop_pkt_cnt;
372 u64 tx_oq_drop_pkt_cnt;
373 u64 nic_l2_err_drop_pkt_cnt;
374 u64 roc_l2_err_drop_pkt_cnt;
375 };
376
377 /* mac stats ,opcode id: 0x0032 */
378 struct hclge_mac_stats {
379 u64 mac_tx_mac_pause_num;
380 u64 mac_rx_mac_pause_num;
381 u64 mac_tx_pfc_pri0_pkt_num;
382 u64 mac_tx_pfc_pri1_pkt_num;
383 u64 mac_tx_pfc_pri2_pkt_num;
384 u64 mac_tx_pfc_pri3_pkt_num;
385 u64 mac_tx_pfc_pri4_pkt_num;
386 u64 mac_tx_pfc_pri5_pkt_num;
387 u64 mac_tx_pfc_pri6_pkt_num;
388 u64 mac_tx_pfc_pri7_pkt_num;
389 u64 mac_rx_pfc_pri0_pkt_num;
390 u64 mac_rx_pfc_pri1_pkt_num;
391 u64 mac_rx_pfc_pri2_pkt_num;
392 u64 mac_rx_pfc_pri3_pkt_num;
393 u64 mac_rx_pfc_pri4_pkt_num;
394 u64 mac_rx_pfc_pri5_pkt_num;
395 u64 mac_rx_pfc_pri6_pkt_num;
396 u64 mac_rx_pfc_pri7_pkt_num;
397 u64 mac_tx_total_pkt_num;
398 u64 mac_tx_total_oct_num;
399 u64 mac_tx_good_pkt_num;
400 u64 mac_tx_bad_pkt_num;
401 u64 mac_tx_good_oct_num;
402 u64 mac_tx_bad_oct_num;
403 u64 mac_tx_uni_pkt_num;
404 u64 mac_tx_multi_pkt_num;
405 u64 mac_tx_broad_pkt_num;
406 u64 mac_tx_undersize_pkt_num;
407 u64 mac_tx_oversize_pkt_num;
408 u64 mac_tx_64_oct_pkt_num;
409 u64 mac_tx_65_127_oct_pkt_num;
410 u64 mac_tx_128_255_oct_pkt_num;
411 u64 mac_tx_256_511_oct_pkt_num;
412 u64 mac_tx_512_1023_oct_pkt_num;
413 u64 mac_tx_1024_1518_oct_pkt_num;
414 u64 mac_tx_1519_2047_oct_pkt_num;
415 u64 mac_tx_2048_4095_oct_pkt_num;
416 u64 mac_tx_4096_8191_oct_pkt_num;
417 u64 rsv0;
418 u64 mac_tx_8192_9216_oct_pkt_num;
419 u64 mac_tx_9217_12287_oct_pkt_num;
420 u64 mac_tx_12288_16383_oct_pkt_num;
421 u64 mac_tx_1519_max_good_oct_pkt_num;
422 u64 mac_tx_1519_max_bad_oct_pkt_num;
423
424 u64 mac_rx_total_pkt_num;
425 u64 mac_rx_total_oct_num;
426 u64 mac_rx_good_pkt_num;
427 u64 mac_rx_bad_pkt_num;
428 u64 mac_rx_good_oct_num;
429 u64 mac_rx_bad_oct_num;
430 u64 mac_rx_uni_pkt_num;
431 u64 mac_rx_multi_pkt_num;
432 u64 mac_rx_broad_pkt_num;
433 u64 mac_rx_undersize_pkt_num;
434 u64 mac_rx_oversize_pkt_num;
435 u64 mac_rx_64_oct_pkt_num;
436 u64 mac_rx_65_127_oct_pkt_num;
437 u64 mac_rx_128_255_oct_pkt_num;
438 u64 mac_rx_256_511_oct_pkt_num;
439 u64 mac_rx_512_1023_oct_pkt_num;
440 u64 mac_rx_1024_1518_oct_pkt_num;
441 u64 mac_rx_1519_2047_oct_pkt_num;
442 u64 mac_rx_2048_4095_oct_pkt_num;
443 u64 mac_rx_4096_8191_oct_pkt_num;
444 u64 rsv1;
445 u64 mac_rx_8192_9216_oct_pkt_num;
446 u64 mac_rx_9217_12287_oct_pkt_num;
447 u64 mac_rx_12288_16383_oct_pkt_num;
448 u64 mac_rx_1519_max_good_oct_pkt_num;
449 u64 mac_rx_1519_max_bad_oct_pkt_num;
450
451 u64 mac_tx_fragment_pkt_num;
452 u64 mac_tx_undermin_pkt_num;
453 u64 mac_tx_jabber_pkt_num;
454 u64 mac_tx_err_all_pkt_num;
455 u64 mac_tx_from_app_good_pkt_num;
456 u64 mac_tx_from_app_bad_pkt_num;
457 u64 mac_rx_fragment_pkt_num;
458 u64 mac_rx_undermin_pkt_num;
459 u64 mac_rx_jabber_pkt_num;
460 u64 mac_rx_fcs_err_pkt_num;
461 u64 mac_rx_send_app_good_pkt_num;
462 u64 mac_rx_send_app_bad_pkt_num;
463 };
464
465 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
466 struct hclge_hw_stats {
467 struct hclge_mac_stats mac_stats;
468 struct hclge_64_bit_stats all_64_bit_stats;
469 struct hclge_32_bit_stats all_32_bit_stats;
470 u32 stats_timer;
471 };
472
473 struct hclge_vlan_type_cfg {
474 u16 rx_ot_fst_vlan_type;
475 u16 rx_ot_sec_vlan_type;
476 u16 rx_in_fst_vlan_type;
477 u16 rx_in_sec_vlan_type;
478 u16 tx_ot_vlan_type;
479 u16 tx_in_vlan_type;
480 };
481
482 #define HCLGE_VPORT_NUM 256
483 struct hclge_dev {
484 struct pci_dev *pdev;
485 struct hnae3_ae_dev *ae_dev;
486 struct hclge_hw hw;
487 struct hclge_misc_vector misc_vector;
488 struct hclge_hw_stats hw_stats;
489 unsigned long state;
490
491 enum hnae3_reset_type reset_type;
492 unsigned long reset_request; /* reset has been requested */
493 unsigned long reset_pending; /* client rst is pending to be served */
494 u32 fw_version;
495 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
496 u16 num_tqps; /* Num task queue pairs of this PF */
497 u16 num_req_vfs; /* Num VFs requested for this PF */
498
499 u16 base_tqp_pid; /* Base task tqp physical id of this PF */
500 u16 alloc_rss_size; /* Allocated RSS task queue */
501 u16 rss_size_max; /* HW defined max RSS task queue */
502
503 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
504 u16 num_alloc_vport; /* Num vports this driver supports */
505 u32 numa_node_mask;
506 u16 rx_buf_len;
507 u16 num_desc;
508 u8 hw_tc_map;
509 u8 tc_num_last_time;
510 enum hclge_fc_mode fc_mode_last_time;
511
512 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1
513 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
514 u8 tx_sch_mode;
515 u8 tc_max;
516 u8 pfc_max;
517
518 u8 default_up;
519 u8 dcbx_cap;
520 struct hclge_tm_info tm_info;
521
522 u16 num_msi;
523 u16 num_msi_left;
524 u16 num_msi_used;
525 u32 base_msi_vector;
526 u16 *vector_status;
527 int *vector_irq;
528 u16 num_roce_msi; /* Num of roce vectors for this PF */
529 int roce_base_vector;
530
531 u16 pending_udp_bitmap;
532
533 u16 rx_itr_default;
534 u16 tx_itr_default;
535
536 u16 adminq_work_limit; /* Num of admin receive queue desc to process */
537 unsigned long service_timer_period;
538 unsigned long service_timer_previous;
539 struct timer_list service_timer;
540 struct work_struct service_task;
541 struct work_struct rst_service_task;
542 struct work_struct mbx_service_task;
543
544 bool cur_promisc;
545 int num_alloc_vfs; /* Actual number of VFs allocated */
546
547 struct hclge_tqp *htqp;
548 struct hclge_vport *vport;
549
550 struct dentry *hclge_dbgfs;
551
552 struct hnae3_client *nic_client;
553 struct hnae3_client *roce_client;
554
555 #define HCLGE_FLAG_MAIN BIT(0)
556 #define HCLGE_FLAG_DCB_CAPABLE BIT(1)
557 #define HCLGE_FLAG_DCB_ENABLE BIT(2)
558 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
559 u32 flag;
560
561 u32 pkt_buf_size; /* Total pf buf size for tx/rx */
562 u32 mps; /* Max packet size */
563
564 enum hclge_mta_dmac_sel_type mta_mac_sel_type;
565 bool enable_mta; /* Multicast filter enable */
566
567 struct hclge_vlan_type_cfg vlan_type_cfg;
568
569 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
570 };
571
572 /* VPort level vlan tag configuration for TX direction */
573 struct hclge_tx_vtag_cfg {
574 bool accept_tag1; /* Whether accept tag1 packet from host */
575 bool accept_untag1; /* Whether accept untag1 packet from host */
576 bool accept_tag2;
577 bool accept_untag2;
578 bool insert_tag1_en; /* Whether insert inner vlan tag */
579 bool insert_tag2_en; /* Whether insert outer vlan tag */
580 u16 default_tag1; /* The default inner vlan tag to insert */
581 u16 default_tag2; /* The default outer vlan tag to insert */
582 };
583
584 /* VPort level vlan tag configuration for RX direction */
585 struct hclge_rx_vtag_cfg {
586 bool strip_tag1_en; /* Whether strip inner vlan tag */
587 bool strip_tag2_en; /* Whether strip outer vlan tag */
588 bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
589 bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
590 };
591
592 struct hclge_rss_tuple_cfg {
593 u8 ipv4_tcp_en;
594 u8 ipv4_udp_en;
595 u8 ipv4_sctp_en;
596 u8 ipv4_fragment_en;
597 u8 ipv6_tcp_en;
598 u8 ipv6_udp_en;
599 u8 ipv6_sctp_en;
600 u8 ipv6_fragment_en;
601 };
602
603 struct hclge_vport {
604 u16 alloc_tqps; /* Allocated Tx/Rx queues */
605
606 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
607 /* User configured lookup table entries */
608 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
609 int rss_algo; /* User configured hash algorithm */
610 /* User configured rss tuple sets */
611 struct hclge_rss_tuple_cfg rss_tuple_sets;
612
613 u16 alloc_rss_size;
614
615 u16 qs_offset;
616 u16 bw_limit; /* VSI BW Limit (0 = disabled) */
617 u8 dwrr;
618
619 struct hclge_tx_vtag_cfg txvlan_cfg;
620 struct hclge_rx_vtag_cfg rxvlan_cfg;
621
622 int vport_id;
623 struct hclge_dev *back; /* Back reference to associated dev */
624 struct hnae3_handle nic;
625 struct hnae3_handle roce;
626
627 bool accept_mta_mc; /* whether to accept mta filter multicast */
628 unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
629 };
630
631 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
632 bool en_mc, bool en_bc, int vport_id);
633
634 int hclge_add_uc_addr_common(struct hclge_vport *vport,
635 const unsigned char *addr);
636 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
637 const unsigned char *addr);
638 int hclge_add_mc_addr_common(struct hclge_vport *vport,
639 const unsigned char *addr);
640 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
641 const unsigned char *addr);
642
643 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
644 u8 func_id,
645 bool enable);
646 int hclge_update_mta_status_common(struct hclge_vport *vport,
647 unsigned long *status,
648 u16 idx,
649 u16 count,
650 bool update_filter);
651
652 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
653 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
654 int vector_id, bool en,
655 struct hnae3_ring_chain_node *ring_chain);
656
657 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
658 {
659 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
660
661 return tqp->index;
662 }
663
664 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
665 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
666 u16 vlan_id, bool is_kill);
667 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
668
669 int hclge_buffer_alloc(struct hclge_dev *hdev);
670 int hclge_rss_init_hw(struct hclge_dev *hdev);
671 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
672
673 void hclge_mbx_handler(struct hclge_dev *hdev);
674 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
675 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
676 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
677 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
678 #endif