1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
7 #include <linux/types.h>
9 #include <linux/if_vlan.h>
11 #include "hclge_cmd.h"
14 #define HCLGE_MOD_VERSION "1.0"
15 #define HCLGE_DRIVER_NAME "hclge"
17 #define HCLGE_MAX_PF_NUM 8
19 #define HCLGE_INVALID_VPORT 0xffff
21 #define HCLGE_PF_CFG_BLOCK_SIZE 32
22 #define HCLGE_PF_CFG_DESC_NUM \
23 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
25 #define HCLGE_VECTOR_REG_BASE 0x20000
26 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400
28 #define HCLGE_VECTOR_REG_OFFSET 0x4
29 #define HCLGE_VECTOR_VF_OFFSET 0x100000
31 #define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
32 #define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
33 #define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
34 #define HCLGE_CMDQ_TX_TAIL_REG 0x27010
35 #define HCLGE_CMDQ_TX_HEAD_REG 0x27014
36 #define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
37 #define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
38 #define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
39 #define HCLGE_CMDQ_RX_TAIL_REG 0x27024
40 #define HCLGE_CMDQ_RX_HEAD_REG 0x27028
41 #define HCLGE_CMDQ_INTR_SRC_REG 0x27100
42 #define HCLGE_CMDQ_INTR_STS_REG 0x27104
43 #define HCLGE_CMDQ_INTR_EN_REG 0x27108
44 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
46 /* bar registers for common func */
47 #define HCLGE_VECTOR0_OTER_EN_REG 0x20600
48 #define HCLGE_RAS_OTHER_STS_REG 0x20B00
49 #define HCLGE_FUNC_RESET_STS_REG 0x20C00
50 #define HCLGE_GRO_EN_REG 0x28000
52 /* bar registers for rcb */
53 #define HCLGE_RING_RX_ADDR_L_REG 0x80000
54 #define HCLGE_RING_RX_ADDR_H_REG 0x80004
55 #define HCLGE_RING_RX_BD_NUM_REG 0x80008
56 #define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C
57 #define HCLGE_RING_RX_MERGE_EN_REG 0x80014
58 #define HCLGE_RING_RX_TAIL_REG 0x80018
59 #define HCLGE_RING_RX_HEAD_REG 0x8001C
60 #define HCLGE_RING_RX_FBD_NUM_REG 0x80020
61 #define HCLGE_RING_RX_OFFSET_REG 0x80024
62 #define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028
63 #define HCLGE_RING_RX_STASH_REG 0x80030
64 #define HCLGE_RING_RX_BD_ERR_REG 0x80034
65 #define HCLGE_RING_TX_ADDR_L_REG 0x80040
66 #define HCLGE_RING_TX_ADDR_H_REG 0x80044
67 #define HCLGE_RING_TX_BD_NUM_REG 0x80048
68 #define HCLGE_RING_TX_PRIORITY_REG 0x8004C
69 #define HCLGE_RING_TX_TC_REG 0x80050
70 #define HCLGE_RING_TX_MERGE_EN_REG 0x80054
71 #define HCLGE_RING_TX_TAIL_REG 0x80058
72 #define HCLGE_RING_TX_HEAD_REG 0x8005C
73 #define HCLGE_RING_TX_FBD_NUM_REG 0x80060
74 #define HCLGE_RING_TX_OFFSET_REG 0x80064
75 #define HCLGE_RING_TX_EBD_NUM_REG 0x80068
76 #define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070
77 #define HCLGE_RING_TX_BD_ERR_REG 0x80074
78 #define HCLGE_RING_EN_REG 0x80090
80 /* bar registers for tqp interrupt */
81 #define HCLGE_TQP_INTR_CTRL_REG 0x20000
82 #define HCLGE_TQP_INTR_GL0_REG 0x20100
83 #define HCLGE_TQP_INTR_GL1_REG 0x20200
84 #define HCLGE_TQP_INTR_GL2_REG 0x20300
85 #define HCLGE_TQP_INTR_RL_REG 0x20900
87 #define HCLGE_RSS_IND_TBL_SIZE 512
88 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
89 #define HCLGE_RSS_KEY_SIZE 40
90 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
91 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1
92 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
93 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
94 #define HCLGE_RSS_CFG_TBL_NUM \
95 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
97 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
98 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
99 #define HCLGE_D_PORT_BIT BIT(0)
100 #define HCLGE_S_PORT_BIT BIT(1)
101 #define HCLGE_D_IP_BIT BIT(2)
102 #define HCLGE_S_IP_BIT BIT(3)
103 #define HCLGE_V_TAG_BIT BIT(4)
105 #define HCLGE_RSS_TC_SIZE_0 1
106 #define HCLGE_RSS_TC_SIZE_1 2
107 #define HCLGE_RSS_TC_SIZE_2 4
108 #define HCLGE_RSS_TC_SIZE_3 8
109 #define HCLGE_RSS_TC_SIZE_4 16
110 #define HCLGE_RSS_TC_SIZE_5 32
111 #define HCLGE_RSS_TC_SIZE_6 64
112 #define HCLGE_RSS_TC_SIZE_7 128
114 #define HCLGE_UMV_TBL_SIZE 3072
115 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
116 (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
118 #define HCLGE_TQP_RESET_TRY_TIMES 10
120 #define HCLGE_PHY_PAGE_MDIX 0
121 #define HCLGE_PHY_PAGE_COPPER 0
123 /* Page Selection Reg. */
124 #define HCLGE_PHY_PAGE_REG 22
126 /* Copper Specific Control Register */
127 #define HCLGE_PHY_CSC_REG 16
129 /* Copper Specific Status Register */
130 #define HCLGE_PHY_CSS_REG 17
132 #define HCLGE_PHY_MDIX_CTRL_S 5
133 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
135 #define HCLGE_PHY_MDIX_STATUS_B 6
136 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
138 /* Factor used to calculate offset and bitmap of VF num */
139 #define HCLGE_VF_NUM_PER_CMD 64
140 #define HCLGE_VF_NUM_PER_BYTE 8
142 enum HLCGE_PORT_TYPE
{
147 #define HCLGE_PF_ID_S 0
148 #define HCLGE_PF_ID_M GENMASK(2, 0)
149 #define HCLGE_VF_ID_S 3
150 #define HCLGE_VF_ID_M GENMASK(10, 3)
151 #define HCLGE_PORT_TYPE_B 11
152 #define HCLGE_NETWORK_PORT_ID_S 0
153 #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
155 /* Reset related Registers */
156 #define HCLGE_PF_OTHER_INT_REG 0x20600
157 #define HCLGE_MISC_RESET_STS_REG 0x20700
158 #define HCLGE_MISC_VECTOR_INT_STS 0x20800
159 #define HCLGE_GLOBAL_RESET_REG 0x20A00
160 #define HCLGE_GLOBAL_RESET_BIT 0
161 #define HCLGE_CORE_RESET_BIT 1
162 #define HCLGE_IMP_RESET_BIT 2
163 #define HCLGE_FUN_RST_ING 0x20C00
164 #define HCLGE_FUN_RST_ING_B 0
166 /* Vector0 register bits define */
167 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
168 #define HCLGE_VECTOR0_CORERESET_INT_B 6
169 #define HCLGE_VECTOR0_IMPRESET_INT_B 7
171 /* Vector0 interrupt CMDQ event source register(RW) */
172 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
173 /* CMDQ register bits for RX event(=MBX event) */
174 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
176 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1
178 #define HCLGE_MAC_DEFAULT_FRAME \
179 (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
180 #define HCLGE_MAC_MIN_FRAME 64
181 #define HCLGE_MAC_MAX_FRAME 9728
183 #define HCLGE_SUPPORT_1G_BIT BIT(0)
184 #define HCLGE_SUPPORT_10G_BIT BIT(1)
185 #define HCLGE_SUPPORT_25G_BIT BIT(2)
186 #define HCLGE_SUPPORT_50G_BIT BIT(3)
187 #define HCLGE_SUPPORT_100G_BIT BIT(4)
189 enum HCLGE_DEV_STATE
{
190 HCLGE_STATE_REINITING
,
192 HCLGE_STATE_DISABLED
,
193 HCLGE_STATE_REMOVING
,
194 HCLGE_STATE_SERVICE_INITED
,
195 HCLGE_STATE_SERVICE_SCHED
,
196 HCLGE_STATE_RST_SERVICE_SCHED
,
197 HCLGE_STATE_RST_HANDLING
,
198 HCLGE_STATE_MBX_SERVICE_SCHED
,
199 HCLGE_STATE_MBX_HANDLING
,
200 HCLGE_STATE_STATISTICS_UPDATING
,
201 HCLGE_STATE_CMD_DISABLE
,
205 enum hclge_evt_cause
{
206 HCLGE_VECTOR0_EVENT_RST
,
207 HCLGE_VECTOR0_EVENT_MBX
,
208 HCLGE_VECTOR0_EVENT_ERR
,
209 HCLGE_VECTOR0_EVENT_OTHER
,
212 #define HCLGE_MPF_ENBALE 1
214 enum HCLGE_MAC_SPEED
{
215 HCLGE_MAC_SPEED_UNKNOWN
= 0, /* unknown */
216 HCLGE_MAC_SPEED_10M
= 10, /* 10 Mbps */
217 HCLGE_MAC_SPEED_100M
= 100, /* 100 Mbps */
218 HCLGE_MAC_SPEED_1G
= 1000, /* 1000 Mbps = 1 Gbps */
219 HCLGE_MAC_SPEED_10G
= 10000, /* 10000 Mbps = 10 Gbps */
220 HCLGE_MAC_SPEED_25G
= 25000, /* 25000 Mbps = 25 Gbps */
221 HCLGE_MAC_SPEED_40G
= 40000, /* 40000 Mbps = 40 Gbps */
222 HCLGE_MAC_SPEED_50G
= 50000, /* 50000 Mbps = 50 Gbps */
223 HCLGE_MAC_SPEED_100G
= 100000 /* 100000 Mbps = 100 Gbps */
226 enum HCLGE_MAC_DUPLEX
{
235 u8 mac_addr
[ETH_ALEN
];
239 int link
; /* store the link status of mac & phy (if phy exit)*/
240 struct phy_device
*phydev
;
241 struct mii_bus
*mdio_bus
;
242 phy_interface_t phy_if
;
243 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported
);
244 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising
);
248 void __iomem
*io_base
;
249 struct hclge_mac mac
;
251 struct hclge_cmq cmq
;
255 struct hlcge_tqp_stats
{
256 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
257 u64 rcb_tx_ring_pktnum_rcd
; /* 32bit */
258 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
259 u64 rcb_rx_ring_pktnum_rcd
; /* 32bit */
263 /* copy of device pointer from pci_dev,
264 * used when perform DMA mapping
267 struct hnae3_queue q
;
268 struct hlcge_tqp_stats tqp_stats
;
269 u16 index
; /* Global index in a NIC controller */
283 #define HCLGE_PG_NUM 4
284 #define HCLGE_SCH_MODE_SP 0
285 #define HCLGE_SCH_MODE_DWRR 1
286 struct hclge_pg_info
{
288 u8 pg_sch_mode
; /* 0: sp; 1: dwrr */
291 u8 tc_dwrr
[HNAE3_MAX_TC
];
294 struct hclge_tc_info
{
296 u8 tc_sch_mode
; /* 0: sp; 1: dwrr */
309 u8 mac_addr
[ETH_ALEN
];
316 struct hclge_tm_info
{
318 u8 num_pg
; /* It must be 1 if vNET-Base schd */
319 u8 pg_dwrr
[HCLGE_PG_NUM
];
320 u8 prio_tc
[HNAE3_MAX_USER_PRIO
];
321 struct hclge_pg_info pg_info
[HCLGE_PG_NUM
];
322 struct hclge_tc_info tc_info
[HNAE3_MAX_TC
];
323 enum hclge_fc_mode fc_mode
;
324 u8 hw_pfc_map
; /* Allow for packet drop or not on this TC */
327 struct hclge_comm_stats_str
{
328 char desc
[ETH_GSTRING_LEN
];
329 unsigned long offset
;
332 /* mac stats ,opcode id: 0x0032 */
333 struct hclge_mac_stats
{
334 u64 mac_tx_mac_pause_num
;
335 u64 mac_rx_mac_pause_num
;
336 u64 mac_tx_pfc_pri0_pkt_num
;
337 u64 mac_tx_pfc_pri1_pkt_num
;
338 u64 mac_tx_pfc_pri2_pkt_num
;
339 u64 mac_tx_pfc_pri3_pkt_num
;
340 u64 mac_tx_pfc_pri4_pkt_num
;
341 u64 mac_tx_pfc_pri5_pkt_num
;
342 u64 mac_tx_pfc_pri6_pkt_num
;
343 u64 mac_tx_pfc_pri7_pkt_num
;
344 u64 mac_rx_pfc_pri0_pkt_num
;
345 u64 mac_rx_pfc_pri1_pkt_num
;
346 u64 mac_rx_pfc_pri2_pkt_num
;
347 u64 mac_rx_pfc_pri3_pkt_num
;
348 u64 mac_rx_pfc_pri4_pkt_num
;
349 u64 mac_rx_pfc_pri5_pkt_num
;
350 u64 mac_rx_pfc_pri6_pkt_num
;
351 u64 mac_rx_pfc_pri7_pkt_num
;
352 u64 mac_tx_total_pkt_num
;
353 u64 mac_tx_total_oct_num
;
354 u64 mac_tx_good_pkt_num
;
355 u64 mac_tx_bad_pkt_num
;
356 u64 mac_tx_good_oct_num
;
357 u64 mac_tx_bad_oct_num
;
358 u64 mac_tx_uni_pkt_num
;
359 u64 mac_tx_multi_pkt_num
;
360 u64 mac_tx_broad_pkt_num
;
361 u64 mac_tx_undersize_pkt_num
;
362 u64 mac_tx_oversize_pkt_num
;
363 u64 mac_tx_64_oct_pkt_num
;
364 u64 mac_tx_65_127_oct_pkt_num
;
365 u64 mac_tx_128_255_oct_pkt_num
;
366 u64 mac_tx_256_511_oct_pkt_num
;
367 u64 mac_tx_512_1023_oct_pkt_num
;
368 u64 mac_tx_1024_1518_oct_pkt_num
;
369 u64 mac_tx_1519_2047_oct_pkt_num
;
370 u64 mac_tx_2048_4095_oct_pkt_num
;
371 u64 mac_tx_4096_8191_oct_pkt_num
;
373 u64 mac_tx_8192_9216_oct_pkt_num
;
374 u64 mac_tx_9217_12287_oct_pkt_num
;
375 u64 mac_tx_12288_16383_oct_pkt_num
;
376 u64 mac_tx_1519_max_good_oct_pkt_num
;
377 u64 mac_tx_1519_max_bad_oct_pkt_num
;
379 u64 mac_rx_total_pkt_num
;
380 u64 mac_rx_total_oct_num
;
381 u64 mac_rx_good_pkt_num
;
382 u64 mac_rx_bad_pkt_num
;
383 u64 mac_rx_good_oct_num
;
384 u64 mac_rx_bad_oct_num
;
385 u64 mac_rx_uni_pkt_num
;
386 u64 mac_rx_multi_pkt_num
;
387 u64 mac_rx_broad_pkt_num
;
388 u64 mac_rx_undersize_pkt_num
;
389 u64 mac_rx_oversize_pkt_num
;
390 u64 mac_rx_64_oct_pkt_num
;
391 u64 mac_rx_65_127_oct_pkt_num
;
392 u64 mac_rx_128_255_oct_pkt_num
;
393 u64 mac_rx_256_511_oct_pkt_num
;
394 u64 mac_rx_512_1023_oct_pkt_num
;
395 u64 mac_rx_1024_1518_oct_pkt_num
;
396 u64 mac_rx_1519_2047_oct_pkt_num
;
397 u64 mac_rx_2048_4095_oct_pkt_num
;
398 u64 mac_rx_4096_8191_oct_pkt_num
;
400 u64 mac_rx_8192_9216_oct_pkt_num
;
401 u64 mac_rx_9217_12287_oct_pkt_num
;
402 u64 mac_rx_12288_16383_oct_pkt_num
;
403 u64 mac_rx_1519_max_good_oct_pkt_num
;
404 u64 mac_rx_1519_max_bad_oct_pkt_num
;
406 u64 mac_tx_fragment_pkt_num
;
407 u64 mac_tx_undermin_pkt_num
;
408 u64 mac_tx_jabber_pkt_num
;
409 u64 mac_tx_err_all_pkt_num
;
410 u64 mac_tx_from_app_good_pkt_num
;
411 u64 mac_tx_from_app_bad_pkt_num
;
412 u64 mac_rx_fragment_pkt_num
;
413 u64 mac_rx_undermin_pkt_num
;
414 u64 mac_rx_jabber_pkt_num
;
415 u64 mac_rx_fcs_err_pkt_num
;
416 u64 mac_rx_send_app_good_pkt_num
;
417 u64 mac_rx_send_app_bad_pkt_num
;
420 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
421 struct hclge_hw_stats
{
422 struct hclge_mac_stats mac_stats
;
426 struct hclge_vlan_type_cfg
{
427 u16 rx_ot_fst_vlan_type
;
428 u16 rx_ot_sec_vlan_type
;
429 u16 rx_in_fst_vlan_type
;
430 u16 rx_in_sec_vlan_type
;
436 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
,
437 HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2
,
438 HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1
,
439 HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2
,
442 enum HCLGE_FD_KEY_TYPE
{
443 HCLGE_FD_KEY_BASE_ON_PTYPE
,
444 HCLGE_FD_KEY_BASE_ON_TUPLE
,
447 enum HCLGE_FD_STAGE
{
452 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
453 * INNER_XXX indicate tuples in tunneled header of tunnel packet or
454 * tuples of non-tunnel packet
456 enum HCLGE_FD_TUPLE
{
490 enum HCLGE_FD_META_DATA
{
507 static const struct key_info meta_data_key_info
[] = {
508 { PACKET_TYPE_ID
, 6},
518 static const struct key_info tuple_key_info
[] = {
519 { OUTER_DST_MAC
, 48},
520 { OUTER_SRC_MAC
, 48},
521 { OUTER_VLAN_TAG_FST
, 16},
522 { OUTER_VLAN_TAG_SEC
, 16},
523 { OUTER_ETH_TYPE
, 16},
526 { OUTER_IP_PROTO
, 8},
530 { OUTER_SRC_PORT
, 16},
531 { OUTER_DST_PORT
, 16},
533 { OUTER_TUN_VNI
, 24},
534 { OUTER_TUN_FLOW_ID
, 8},
535 { INNER_DST_MAC
, 48},
536 { INNER_SRC_MAC
, 48},
537 { INNER_VLAN_TAG_FST
, 16},
538 { INNER_VLAN_TAG_SEC
, 16},
539 { INNER_ETH_TYPE
, 16},
542 { INNER_IP_PROTO
, 8},
546 { INNER_SRC_PORT
, 16},
547 { INNER_DST_PORT
, 16},
551 #define MAX_KEY_LENGTH 400
552 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
553 #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
554 #define MAX_META_DATA_LENGTH 32
556 enum HCLGE_FD_PACKET_TYPE
{
561 enum HCLGE_FD_ACTION
{
562 HCLGE_FD_ACTION_ACCEPT_PACKET
,
563 HCLGE_FD_ACTION_DROP_PACKET
,
566 struct hclge_fd_key_cfg
{
568 u8 inner_sipv6_word_en
;
569 u8 inner_dipv6_word_en
;
570 u8 outer_sipv6_word_en
;
571 u8 outer_dipv6_word_en
;
573 u32 meta_data_active
;
576 struct hclge_fd_cfg
{
581 u32 rule_num
[2]; /* rule entry number */
582 u16 cnt_num
[2]; /* rule hit counter number */
583 struct hclge_fd_key_cfg key_cfg
[2];
586 struct hclge_fd_rule_tuples
{
599 struct hclge_fd_rule
{
600 struct hlist_node rule_node
;
601 struct hclge_fd_rule_tuples tuples
;
602 struct hclge_fd_rule_tuples tuples_mask
;
611 struct hclge_fd_ad_data
{
614 u8 forward_to_direct_queue
;
619 u8 write_rule_id_to_bd
;
624 /* For each bit of TCAM entry, it uses a pair of 'x' and
625 * 'y' to indicate which value to match, like below:
626 * ----------------------------------
627 * | bit x | bit y | search value |
628 * ----------------------------------
629 * | 0 | 0 | always hit |
630 * ----------------------------------
631 * | 1 | 0 | match '0' |
632 * ----------------------------------
633 * | 0 | 1 | match '1' |
634 * ----------------------------------
635 * | 1 | 1 | invalid |
636 * ----------------------------------
637 * Then for input key(k) and mask(v), we can calculate the value by
642 #define calc_x(x, k, v) ((x) = (~(k) & (v)))
643 #define calc_y(y, k, v) \
645 const typeof(k) _k_ = (k); \
646 const typeof(v) _v_ = (v); \
647 (y) = (_k_ ^ ~_v_) & (_k_); \
650 #define HCLGE_VPORT_NUM 256
652 struct pci_dev
*pdev
;
653 struct hnae3_ae_dev
*ae_dev
;
655 struct hclge_misc_vector misc_vector
;
656 struct hclge_hw_stats hw_stats
;
658 unsigned long flr_state
;
659 unsigned long last_reset_time
;
661 enum hnae3_reset_type reset_type
;
662 enum hnae3_reset_type reset_level
;
663 unsigned long default_reset_request
;
664 unsigned long reset_request
; /* reset has been requested */
665 unsigned long reset_pending
; /* client rst is pending to be served */
666 unsigned long reset_count
; /* the number of reset has been done */
669 u16 num_vmdq_vport
; /* Num vmdq vport this PF has set up */
670 u16 num_tqps
; /* Num task queue pairs of this PF */
671 u16 num_req_vfs
; /* Num VFs requested for this PF */
673 u16 base_tqp_pid
; /* Base task tqp physical id of this PF */
674 u16 alloc_rss_size
; /* Allocated RSS task queue */
675 u16 rss_size_max
; /* HW defined max RSS task queue */
677 u16 fdir_pf_filter_count
; /* Num of guaranteed filters for this PF */
678 u16 num_alloc_vport
; /* Num vports this driver supports */
684 enum hclge_fc_mode fc_mode_last_time
;
685 u8 support_sfp_query
;
687 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1
688 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
695 struct hclge_tm_info tm_info
;
700 u16 roce_base_msix_offset
;
704 u16 num_roce_msi
; /* Num of roce vectors for this PF */
705 int roce_base_vector
;
707 u16 pending_udp_bitmap
;
712 u16 adminq_work_limit
; /* Num of admin receive queue desc to process */
713 unsigned long service_timer_period
;
714 unsigned long service_timer_previous
;
715 struct timer_list service_timer
;
716 struct timer_list reset_timer
;
717 struct work_struct service_task
;
718 struct work_struct rst_service_task
;
719 struct work_struct mbx_service_task
;
722 int num_alloc_vfs
; /* Actual number of VFs allocated */
724 struct hclge_tqp
*htqp
;
725 struct hclge_vport
*vport
;
727 struct dentry
*hclge_dbgfs
;
729 struct hnae3_client
*nic_client
;
730 struct hnae3_client
*roce_client
;
732 #define HCLGE_FLAG_MAIN BIT(0)
733 #define HCLGE_FLAG_DCB_CAPABLE BIT(1)
734 #define HCLGE_FLAG_DCB_ENABLE BIT(2)
735 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
738 u32 pkt_buf_size
; /* Total pf buf size for tx/rx */
739 u32 tx_buf_size
; /* Tx buffer size for each TC */
740 u32 dv_buf_size
; /* Dv buffer size for each TC */
742 u32 mps
; /* Max packet size */
743 /* vport_lock protect resource shared by vports */
744 struct mutex vport_lock
;
746 struct hclge_vlan_type_cfg vlan_type_cfg
;
748 unsigned long vlan_table
[VLAN_N_VID
][BITS_TO_LONGS(HCLGE_VPORT_NUM
)];
750 struct hclge_fd_cfg fd_cfg
;
751 struct hlist_head fd_rule_list
;
752 u16 hclge_fd_rule_num
;
755 /* max available unicast mac vlan space */
757 /* private unicast mac vlan space, it's same for PF and its VFs */
759 /* unicast mac vlan space shared by PF and its VFs */
761 struct mutex umv_mutex
; /* protect share_umv_size */
764 /* VPort level vlan tag configuration for TX direction */
765 struct hclge_tx_vtag_cfg
{
766 bool accept_tag1
; /* Whether accept tag1 packet from host */
767 bool accept_untag1
; /* Whether accept untag1 packet from host */
770 bool insert_tag1_en
; /* Whether insert inner vlan tag */
771 bool insert_tag2_en
; /* Whether insert outer vlan tag */
772 u16 default_tag1
; /* The default inner vlan tag to insert */
773 u16 default_tag2
; /* The default outer vlan tag to insert */
776 /* VPort level vlan tag configuration for RX direction */
777 struct hclge_rx_vtag_cfg
{
778 bool strip_tag1_en
; /* Whether strip inner vlan tag */
779 bool strip_tag2_en
; /* Whether strip outer vlan tag */
780 bool vlan1_vlan_prionly
;/* Inner VLAN Tag up to descriptor Enable */
781 bool vlan2_vlan_prionly
;/* Outer VLAN Tag up to descriptor Enable */
784 struct hclge_rss_tuple_cfg
{
795 enum HCLGE_VPORT_STATE
{
796 HCLGE_VPORT_STATE_ALIVE
,
797 HCLGE_VPORT_STATE_MAX
801 u16 alloc_tqps
; /* Allocated Tx/Rx queues */
803 u8 rss_hash_key
[HCLGE_RSS_KEY_SIZE
]; /* User configured hash keys */
804 /* User configured lookup table entries */
805 u8 rss_indirection_tbl
[HCLGE_RSS_IND_TBL_SIZE
];
806 int rss_algo
; /* User configured hash algorithm */
807 /* User configured rss tuple sets */
808 struct hclge_rss_tuple_cfg rss_tuple_sets
;
813 u16 bw_limit
; /* VSI BW Limit (0 = disabled) */
816 struct hclge_tx_vtag_cfg txvlan_cfg
;
817 struct hclge_rx_vtag_cfg rxvlan_cfg
;
822 struct hclge_dev
*back
; /* Back reference to associated dev */
823 struct hnae3_handle nic
;
824 struct hnae3_handle roce
;
827 unsigned long last_active_jiffies
;
828 u32 mps
; /* Max packet size */
831 void hclge_promisc_param_init(struct hclge_promisc_param
*param
, bool en_uc
,
832 bool en_mc
, bool en_bc
, int vport_id
);
834 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
835 const unsigned char *addr
);
836 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
837 const unsigned char *addr
);
838 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
839 const unsigned char *addr
);
840 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
841 const unsigned char *addr
);
843 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
);
844 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
845 int vector_id
, bool en
,
846 struct hnae3_ring_chain_node
*ring_chain
);
848 static inline int hclge_get_queue_id(struct hnae3_queue
*queue
)
850 struct hclge_tqp
*tqp
= container_of(queue
, struct hclge_tqp
, q
);
855 static inline bool hclge_is_reset_pending(struct hclge_dev
*hdev
)
857 return !!hdev
->reset_pending
;
860 int hclge_inform_reset_assert_to_vf(struct hclge_vport
*vport
);
861 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
);
862 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
863 u16 vlan_id
, bool is_kill
);
864 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
);
866 int hclge_buffer_alloc(struct hclge_dev
*hdev
);
867 int hclge_rss_init_hw(struct hclge_dev
*hdev
);
868 void hclge_rss_indir_init_cfg(struct hclge_dev
*hdev
);
870 int hclge_inform_reset_assert_to_vf(struct hclge_vport
*vport
);
871 void hclge_mbx_handler(struct hclge_dev
*hdev
);
872 int hclge_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
);
873 void hclge_reset_vf_queue(struct hclge_vport
*vport
, u16 queue_id
);
874 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
);
875 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
);
876 int hclge_vport_start(struct hclge_vport
*vport
);
877 void hclge_vport_stop(struct hclge_vport
*vport
);
878 int hclge_set_vport_mtu(struct hclge_vport
*vport
, int new_mtu
);
879 int hclge_dbg_run_cmd(struct hnae3_handle
*handle
, char *cmd_buf
);
880 u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
, u16 queue_id
);