1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "ice_ethdev.h"
19 #define ICE_MAX_QP_NUM "max_queue_pair_num"
20 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
21 #define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
24 int ice_logtype_driver
;
26 static int ice_dev_configure(struct rte_eth_dev
*dev
);
27 static int ice_dev_start(struct rte_eth_dev
*dev
);
28 static void ice_dev_stop(struct rte_eth_dev
*dev
);
29 static void ice_dev_close(struct rte_eth_dev
*dev
);
30 static int ice_dev_reset(struct rte_eth_dev
*dev
);
31 static void ice_dev_info_get(struct rte_eth_dev
*dev
,
32 struct rte_eth_dev_info
*dev_info
);
33 static int ice_link_update(struct rte_eth_dev
*dev
,
34 int wait_to_complete
);
35 static int ice_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
);
36 static int ice_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
);
37 static int ice_vlan_tpid_set(struct rte_eth_dev
*dev
,
38 enum rte_vlan_type vlan_type
,
40 static int ice_rss_reta_update(struct rte_eth_dev
*dev
,
41 struct rte_eth_rss_reta_entry64
*reta_conf
,
43 static int ice_rss_reta_query(struct rte_eth_dev
*dev
,
44 struct rte_eth_rss_reta_entry64
*reta_conf
,
46 static int ice_rss_hash_update(struct rte_eth_dev
*dev
,
47 struct rte_eth_rss_conf
*rss_conf
);
48 static int ice_rss_hash_conf_get(struct rte_eth_dev
*dev
,
49 struct rte_eth_rss_conf
*rss_conf
);
50 static void ice_promisc_enable(struct rte_eth_dev
*dev
);
51 static void ice_promisc_disable(struct rte_eth_dev
*dev
);
52 static void ice_allmulti_enable(struct rte_eth_dev
*dev
);
53 static void ice_allmulti_disable(struct rte_eth_dev
*dev
);
54 static int ice_vlan_filter_set(struct rte_eth_dev
*dev
,
57 static int ice_macaddr_set(struct rte_eth_dev
*dev
,
58 struct ether_addr
*mac_addr
);
59 static int ice_macaddr_add(struct rte_eth_dev
*dev
,
60 struct ether_addr
*mac_addr
,
61 __rte_unused
uint32_t index
,
63 static void ice_macaddr_remove(struct rte_eth_dev
*dev
, uint32_t index
);
64 static int ice_rx_queue_intr_enable(struct rte_eth_dev
*dev
,
66 static int ice_rx_queue_intr_disable(struct rte_eth_dev
*dev
,
68 static int ice_fw_version_get(struct rte_eth_dev
*dev
, char *fw_version
,
70 static int ice_vlan_pvid_set(struct rte_eth_dev
*dev
,
71 uint16_t pvid
, int on
);
72 static int ice_get_eeprom_length(struct rte_eth_dev
*dev
);
73 static int ice_get_eeprom(struct rte_eth_dev
*dev
,
74 struct rte_dev_eeprom_info
*eeprom
);
75 static int ice_stats_get(struct rte_eth_dev
*dev
,
76 struct rte_eth_stats
*stats
);
77 static void ice_stats_reset(struct rte_eth_dev
*dev
);
78 static int ice_xstats_get(struct rte_eth_dev
*dev
,
79 struct rte_eth_xstat
*xstats
, unsigned int n
);
80 static int ice_xstats_get_names(struct rte_eth_dev
*dev
,
81 struct rte_eth_xstat_name
*xstats_names
,
84 static const struct rte_pci_id pci_id_ice_map
[] = {
85 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID
, ICE_DEV_ID_E810C_BACKPLANE
) },
86 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID
, ICE_DEV_ID_E810C_QSFP
) },
87 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID
, ICE_DEV_ID_E810C_SFP
) },
88 { .vendor_id
= 0, /* sentinel */ },
91 static const struct eth_dev_ops ice_eth_dev_ops
= {
92 .dev_configure
= ice_dev_configure
,
93 .dev_start
= ice_dev_start
,
94 .dev_stop
= ice_dev_stop
,
95 .dev_close
= ice_dev_close
,
96 .dev_reset
= ice_dev_reset
,
97 .rx_queue_start
= ice_rx_queue_start
,
98 .rx_queue_stop
= ice_rx_queue_stop
,
99 .tx_queue_start
= ice_tx_queue_start
,
100 .tx_queue_stop
= ice_tx_queue_stop
,
101 .rx_queue_setup
= ice_rx_queue_setup
,
102 .rx_queue_release
= ice_rx_queue_release
,
103 .tx_queue_setup
= ice_tx_queue_setup
,
104 .tx_queue_release
= ice_tx_queue_release
,
105 .dev_infos_get
= ice_dev_info_get
,
106 .dev_supported_ptypes_get
= ice_dev_supported_ptypes_get
,
107 .link_update
= ice_link_update
,
108 .mtu_set
= ice_mtu_set
,
109 .mac_addr_set
= ice_macaddr_set
,
110 .mac_addr_add
= ice_macaddr_add
,
111 .mac_addr_remove
= ice_macaddr_remove
,
112 .vlan_filter_set
= ice_vlan_filter_set
,
113 .vlan_offload_set
= ice_vlan_offload_set
,
114 .vlan_tpid_set
= ice_vlan_tpid_set
,
115 .reta_update
= ice_rss_reta_update
,
116 .reta_query
= ice_rss_reta_query
,
117 .rss_hash_update
= ice_rss_hash_update
,
118 .rss_hash_conf_get
= ice_rss_hash_conf_get
,
119 .promiscuous_enable
= ice_promisc_enable
,
120 .promiscuous_disable
= ice_promisc_disable
,
121 .allmulticast_enable
= ice_allmulti_enable
,
122 .allmulticast_disable
= ice_allmulti_disable
,
123 .rx_queue_intr_enable
= ice_rx_queue_intr_enable
,
124 .rx_queue_intr_disable
= ice_rx_queue_intr_disable
,
125 .fw_version_get
= ice_fw_version_get
,
126 .vlan_pvid_set
= ice_vlan_pvid_set
,
127 .rxq_info_get
= ice_rxq_info_get
,
128 .txq_info_get
= ice_txq_info_get
,
129 .get_eeprom_length
= ice_get_eeprom_length
,
130 .get_eeprom
= ice_get_eeprom
,
131 .rx_queue_count
= ice_rx_queue_count
,
132 .rx_descriptor_status
= ice_rx_descriptor_status
,
133 .tx_descriptor_status
= ice_tx_descriptor_status
,
134 .stats_get
= ice_stats_get
,
135 .stats_reset
= ice_stats_reset
,
136 .xstats_get
= ice_xstats_get
,
137 .xstats_get_names
= ice_xstats_get_names
,
138 .xstats_reset
= ice_stats_reset
,
141 /* store statistics names and its offset in stats structure */
142 struct ice_xstats_name_off
{
143 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
147 static const struct ice_xstats_name_off ice_stats_strings
[] = {
148 {"rx_unicast_packets", offsetof(struct ice_eth_stats
, rx_unicast
)},
149 {"rx_multicast_packets", offsetof(struct ice_eth_stats
, rx_multicast
)},
150 {"rx_broadcast_packets", offsetof(struct ice_eth_stats
, rx_broadcast
)},
151 {"rx_dropped", offsetof(struct ice_eth_stats
, rx_discards
)},
152 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats
,
153 rx_unknown_protocol
)},
154 {"tx_unicast_packets", offsetof(struct ice_eth_stats
, tx_unicast
)},
155 {"tx_multicast_packets", offsetof(struct ice_eth_stats
, tx_multicast
)},
156 {"tx_broadcast_packets", offsetof(struct ice_eth_stats
, tx_broadcast
)},
157 {"tx_dropped", offsetof(struct ice_eth_stats
, tx_discards
)},
160 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
161 sizeof(ice_stats_strings[0]))
163 static const struct ice_xstats_name_off ice_hw_port_strings
[] = {
164 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats
,
165 tx_dropped_link_down
)},
166 {"rx_crc_errors", offsetof(struct ice_hw_port_stats
, crc_errors
)},
167 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats
,
169 {"rx_error_bytes", offsetof(struct ice_hw_port_stats
, error_bytes
)},
170 {"mac_local_errors", offsetof(struct ice_hw_port_stats
,
172 {"mac_remote_errors", offsetof(struct ice_hw_port_stats
,
174 {"rx_len_errors", offsetof(struct ice_hw_port_stats
,
176 {"tx_xon_packets", offsetof(struct ice_hw_port_stats
, link_xon_tx
)},
177 {"rx_xon_packets", offsetof(struct ice_hw_port_stats
, link_xon_rx
)},
178 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats
, link_xoff_tx
)},
179 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats
, link_xoff_rx
)},
180 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats
, rx_size_64
)},
181 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats
,
183 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats
,
185 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats
,
187 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats
,
189 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats
,
191 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats
,
193 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats
,
195 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats
,
197 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats
,
198 mac_short_pkt_dropped
)},
199 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats
,
201 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats
, rx_jabber
)},
202 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats
, tx_size_64
)},
203 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats
,
205 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats
,
207 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats
,
209 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats
,
211 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats
,
213 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats
,
217 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
218 sizeof(ice_hw_port_strings[0]))
221 ice_init_controlq_parameter(struct ice_hw
*hw
)
223 /* fields for adminq */
224 hw
->adminq
.num_rq_entries
= ICE_ADMINQ_LEN
;
225 hw
->adminq
.num_sq_entries
= ICE_ADMINQ_LEN
;
226 hw
->adminq
.rq_buf_size
= ICE_ADMINQ_BUF_SZ
;
227 hw
->adminq
.sq_buf_size
= ICE_ADMINQ_BUF_SZ
;
229 /* fields for mailboxq, DPDK used as PF host */
230 hw
->mailboxq
.num_rq_entries
= ICE_MAILBOXQ_LEN
;
231 hw
->mailboxq
.num_sq_entries
= ICE_MAILBOXQ_LEN
;
232 hw
->mailboxq
.rq_buf_size
= ICE_MAILBOXQ_BUF_SZ
;
233 hw
->mailboxq
.sq_buf_size
= ICE_MAILBOXQ_BUF_SZ
;
237 ice_check_qp_num(const char *key
, const char *qp_value
,
238 __rte_unused
void *opaque
)
243 while (isblank(*qp_value
))
246 num
= strtoul(qp_value
, &end
, 10);
248 if (!num
|| (*end
== '-') || errno
) {
249 PMD_DRV_LOG(WARNING
, "invalid value:\"%s\" for key:\"%s\", "
259 ice_config_max_queue_pair_num(struct rte_devargs
*devargs
)
261 struct rte_kvargs
*kvlist
;
262 const char *queue_num_key
= ICE_MAX_QP_NUM
;
268 kvlist
= rte_kvargs_parse(devargs
->args
, NULL
);
272 if (!rte_kvargs_count(kvlist
, queue_num_key
)) {
273 rte_kvargs_free(kvlist
);
277 if (rte_kvargs_process(kvlist
, queue_num_key
,
278 ice_check_qp_num
, NULL
) < 0) {
279 rte_kvargs_free(kvlist
);
282 ret
= rte_kvargs_process(kvlist
, queue_num_key
,
283 ice_check_qp_num
, NULL
);
284 rte_kvargs_free(kvlist
);
290 ice_res_pool_init(struct ice_res_pool_info
*pool
, uint32_t base
,
293 struct pool_entry
*entry
;
298 entry
= rte_zmalloc(NULL
, sizeof(*entry
), 0);
301 "Failed to allocate memory for resource pool");
305 /* queue heap initialize */
306 pool
->num_free
= num
;
309 LIST_INIT(&pool
->alloc_list
);
310 LIST_INIT(&pool
->free_list
);
312 /* Initialize element */
316 LIST_INSERT_HEAD(&pool
->free_list
, entry
, next
);
321 ice_res_pool_alloc(struct ice_res_pool_info
*pool
,
324 struct pool_entry
*entry
, *valid_entry
;
327 PMD_INIT_LOG(ERR
, "Invalid parameter");
331 if (pool
->num_free
< num
) {
332 PMD_INIT_LOG(ERR
, "No resource. ask:%u, available:%u",
333 num
, pool
->num_free
);
338 /* Lookup in free list and find most fit one */
339 LIST_FOREACH(entry
, &pool
->free_list
, next
) {
340 if (entry
->len
>= num
) {
342 if (entry
->len
== num
) {
347 valid_entry
->len
> entry
->len
)
352 /* Not find one to satisfy the request, return */
354 PMD_INIT_LOG(ERR
, "No valid entry found");
358 * The entry have equal queue number as requested,
359 * remove it from alloc_list.
361 if (valid_entry
->len
== num
) {
362 LIST_REMOVE(valid_entry
, next
);
365 * The entry have more numbers than requested,
366 * create a new entry for alloc_list and minus its
367 * queue base and number in free_list.
369 entry
= rte_zmalloc(NULL
, sizeof(*entry
), 0);
372 "Failed to allocate memory for "
376 entry
->base
= valid_entry
->base
;
378 valid_entry
->base
+= num
;
379 valid_entry
->len
-= num
;
383 /* Insert it into alloc list, not sorted */
384 LIST_INSERT_HEAD(&pool
->alloc_list
, valid_entry
, next
);
386 pool
->num_free
-= valid_entry
->len
;
387 pool
->num_alloc
+= valid_entry
->len
;
389 return valid_entry
->base
+ pool
->base
;
393 ice_res_pool_destroy(struct ice_res_pool_info
*pool
)
395 struct pool_entry
*entry
, *next_entry
;
400 for (entry
= LIST_FIRST(&pool
->alloc_list
);
401 entry
&& (next_entry
= LIST_NEXT(entry
, next
), 1);
402 entry
= next_entry
) {
403 LIST_REMOVE(entry
, next
);
407 for (entry
= LIST_FIRST(&pool
->free_list
);
408 entry
&& (next_entry
= LIST_NEXT(entry
, next
), 1);
409 entry
= next_entry
) {
410 LIST_REMOVE(entry
, next
);
417 LIST_INIT(&pool
->alloc_list
);
418 LIST_INIT(&pool
->free_list
);
422 ice_vsi_config_default_rss(struct ice_aqc_vsi_props
*info
)
424 /* Set VSI LUT selection */
425 info
->q_opt_rss
= ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI
&
426 ICE_AQ_VSI_Q_OPT_RSS_LUT_M
;
427 /* Set Hash scheme */
428 info
->q_opt_rss
|= ICE_AQ_VSI_Q_OPT_RSS_TPLZ
&
429 ICE_AQ_VSI_Q_OPT_RSS_HASH_M
;
431 info
->q_opt_tc
= ICE_AQ_VSI_Q_OPT_TC_OVR_M
;
434 static enum ice_status
435 ice_vsi_config_tc_queue_mapping(struct ice_vsi
*vsi
,
436 struct ice_aqc_vsi_props
*info
,
437 uint8_t enabled_tcmap
)
439 uint16_t bsf
, qp_idx
;
441 /* default tc 0 now. Multi-TC supporting need to be done later.
442 * Configure TC and queue mapping parameters, for enabled TC,
443 * allocate qpnum_per_tc queues to this traffic.
445 if (enabled_tcmap
!= 0x01) {
446 PMD_INIT_LOG(ERR
, "only TC0 is supported");
450 vsi
->nb_qps
= RTE_MIN(vsi
->nb_qps
, ICE_MAX_Q_PER_TC
);
451 bsf
= rte_bsf32(vsi
->nb_qps
);
452 /* Adjust the queue number to actual queues that can be applied */
453 vsi
->nb_qps
= 0x1 << bsf
;
456 /* Set tc and queue mapping with VSI */
457 info
->tc_mapping
[0] = rte_cpu_to_le_16((qp_idx
<<
458 ICE_AQ_VSI_TC_Q_OFFSET_S
) |
459 (bsf
<< ICE_AQ_VSI_TC_Q_NUM_S
));
461 /* Associate queue number with VSI */
462 info
->mapping_flags
|= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG
);
463 info
->q_mapping
[0] = rte_cpu_to_le_16(vsi
->base_queue
);
464 info
->q_mapping
[1] = rte_cpu_to_le_16(vsi
->nb_qps
);
465 info
->valid_sections
|=
466 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID
);
467 /* Set the info.ingress_table and info.egress_table
468 * for UP translate table. Now just set it to 1:1 map by default
469 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
471 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
472 info
->ingress_table
= rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT
);
473 info
->egress_table
= rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT
);
474 info
->outer_up_table
= rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT
);
479 ice_init_mac_address(struct rte_eth_dev
*dev
)
481 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
483 if (!is_unicast_ether_addr
484 ((struct ether_addr
*)hw
->port_info
[0].mac
.lan_addr
)) {
485 PMD_INIT_LOG(ERR
, "Invalid MAC address");
489 ether_addr_copy((struct ether_addr
*)hw
->port_info
[0].mac
.lan_addr
,
490 (struct ether_addr
*)hw
->port_info
[0].mac
.perm_addr
);
492 dev
->data
->mac_addrs
= rte_zmalloc(NULL
, sizeof(struct ether_addr
), 0);
493 if (!dev
->data
->mac_addrs
) {
495 "Failed to allocate memory to store mac address");
498 /* store it to dev data */
499 ether_addr_copy((struct ether_addr
*)hw
->port_info
[0].mac
.perm_addr
,
500 &dev
->data
->mac_addrs
[0]);
504 /* Find out specific MAC filter */
505 static struct ice_mac_filter
*
506 ice_find_mac_filter(struct ice_vsi
*vsi
, struct ether_addr
*macaddr
)
508 struct ice_mac_filter
*f
;
510 TAILQ_FOREACH(f
, &vsi
->mac_list
, next
) {
511 if (is_same_ether_addr(macaddr
, &f
->mac_info
.mac_addr
))
519 ice_add_mac_filter(struct ice_vsi
*vsi
, struct ether_addr
*mac_addr
)
521 struct ice_fltr_list_entry
*m_list_itr
= NULL
;
522 struct ice_mac_filter
*f
;
523 struct LIST_HEAD_TYPE list_head
;
524 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
527 /* If it's added and configured, return */
528 f
= ice_find_mac_filter(vsi
, mac_addr
);
530 PMD_DRV_LOG(INFO
, "This MAC filter already exists.");
534 INIT_LIST_HEAD(&list_head
);
536 m_list_itr
= (struct ice_fltr_list_entry
*)
537 ice_malloc(hw
, sizeof(*m_list_itr
));
542 ice_memcpy(m_list_itr
->fltr_info
.l_data
.mac
.mac_addr
,
543 mac_addr
, ETH_ALEN
, ICE_NONDMA_TO_NONDMA
);
544 m_list_itr
->fltr_info
.src_id
= ICE_SRC_ID_VSI
;
545 m_list_itr
->fltr_info
.fltr_act
= ICE_FWD_TO_VSI
;
546 m_list_itr
->fltr_info
.lkup_type
= ICE_SW_LKUP_MAC
;
547 m_list_itr
->fltr_info
.flag
= ICE_FLTR_TX
;
548 m_list_itr
->fltr_info
.vsi_handle
= vsi
->idx
;
550 LIST_ADD(&m_list_itr
->list_entry
, &list_head
);
553 ret
= ice_add_mac(hw
, &list_head
);
554 if (ret
!= ICE_SUCCESS
) {
555 PMD_DRV_LOG(ERR
, "Failed to add MAC filter");
559 /* Add the mac addr into mac list */
560 f
= rte_zmalloc(NULL
, sizeof(*f
), 0);
562 PMD_DRV_LOG(ERR
, "failed to allocate memory");
566 rte_memcpy(&f
->mac_info
.mac_addr
, mac_addr
, ETH_ADDR_LEN
);
567 TAILQ_INSERT_TAIL(&vsi
->mac_list
, f
, next
);
573 rte_free(m_list_itr
);
578 ice_remove_mac_filter(struct ice_vsi
*vsi
, struct ether_addr
*mac_addr
)
580 struct ice_fltr_list_entry
*m_list_itr
= NULL
;
581 struct ice_mac_filter
*f
;
582 struct LIST_HEAD_TYPE list_head
;
583 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
586 /* Can't find it, return an error */
587 f
= ice_find_mac_filter(vsi
, mac_addr
);
591 INIT_LIST_HEAD(&list_head
);
593 m_list_itr
= (struct ice_fltr_list_entry
*)
594 ice_malloc(hw
, sizeof(*m_list_itr
));
599 ice_memcpy(m_list_itr
->fltr_info
.l_data
.mac
.mac_addr
,
600 mac_addr
, ETH_ALEN
, ICE_NONDMA_TO_NONDMA
);
601 m_list_itr
->fltr_info
.src_id
= ICE_SRC_ID_VSI
;
602 m_list_itr
->fltr_info
.fltr_act
= ICE_FWD_TO_VSI
;
603 m_list_itr
->fltr_info
.lkup_type
= ICE_SW_LKUP_MAC
;
604 m_list_itr
->fltr_info
.flag
= ICE_FLTR_TX
;
605 m_list_itr
->fltr_info
.vsi_handle
= vsi
->idx
;
607 LIST_ADD(&m_list_itr
->list_entry
, &list_head
);
609 /* remove the mac filter */
610 ret
= ice_remove_mac(hw
, &list_head
);
611 if (ret
!= ICE_SUCCESS
) {
612 PMD_DRV_LOG(ERR
, "Failed to remove MAC filter");
617 /* Remove the mac addr from mac list */
618 TAILQ_REMOVE(&vsi
->mac_list
, f
, next
);
624 rte_free(m_list_itr
);
628 /* Find out specific VLAN filter */
629 static struct ice_vlan_filter
*
630 ice_find_vlan_filter(struct ice_vsi
*vsi
, uint16_t vlan_id
)
632 struct ice_vlan_filter
*f
;
634 TAILQ_FOREACH(f
, &vsi
->vlan_list
, next
) {
635 if (vlan_id
== f
->vlan_info
.vlan_id
)
643 ice_add_vlan_filter(struct ice_vsi
*vsi
, uint16_t vlan_id
)
645 struct ice_fltr_list_entry
*v_list_itr
= NULL
;
646 struct ice_vlan_filter
*f
;
647 struct LIST_HEAD_TYPE list_head
;
651 if (!vsi
|| vlan_id
> ETHER_MAX_VLAN_ID
)
654 hw
= ICE_VSI_TO_HW(vsi
);
656 /* If it's added and configured, return. */
657 f
= ice_find_vlan_filter(vsi
, vlan_id
);
659 PMD_DRV_LOG(INFO
, "This VLAN filter already exists.");
663 if (!vsi
->vlan_anti_spoof_on
&& !vsi
->vlan_filter_on
)
666 INIT_LIST_HEAD(&list_head
);
668 v_list_itr
= (struct ice_fltr_list_entry
*)
669 ice_malloc(hw
, sizeof(*v_list_itr
));
674 v_list_itr
->fltr_info
.l_data
.vlan
.vlan_id
= vlan_id
;
675 v_list_itr
->fltr_info
.src_id
= ICE_SRC_ID_VSI
;
676 v_list_itr
->fltr_info
.fltr_act
= ICE_FWD_TO_VSI
;
677 v_list_itr
->fltr_info
.lkup_type
= ICE_SW_LKUP_VLAN
;
678 v_list_itr
->fltr_info
.flag
= ICE_FLTR_TX
;
679 v_list_itr
->fltr_info
.vsi_handle
= vsi
->idx
;
681 LIST_ADD(&v_list_itr
->list_entry
, &list_head
);
684 ret
= ice_add_vlan(hw
, &list_head
);
685 if (ret
!= ICE_SUCCESS
) {
686 PMD_DRV_LOG(ERR
, "Failed to add VLAN filter");
691 /* Add vlan into vlan list */
692 f
= rte_zmalloc(NULL
, sizeof(*f
), 0);
694 PMD_DRV_LOG(ERR
, "failed to allocate memory");
698 f
->vlan_info
.vlan_id
= vlan_id
;
699 TAILQ_INSERT_TAIL(&vsi
->vlan_list
, f
, next
);
705 rte_free(v_list_itr
);
710 ice_remove_vlan_filter(struct ice_vsi
*vsi
, uint16_t vlan_id
)
712 struct ice_fltr_list_entry
*v_list_itr
= NULL
;
713 struct ice_vlan_filter
*f
;
714 struct LIST_HEAD_TYPE list_head
;
719 * Vlan 0 is the generic filter for untagged packets
720 * and can't be removed.
722 if (!vsi
|| vlan_id
== 0 || vlan_id
> ETHER_MAX_VLAN_ID
)
725 hw
= ICE_VSI_TO_HW(vsi
);
727 /* Can't find it, return an error */
728 f
= ice_find_vlan_filter(vsi
, vlan_id
);
732 INIT_LIST_HEAD(&list_head
);
734 v_list_itr
= (struct ice_fltr_list_entry
*)
735 ice_malloc(hw
, sizeof(*v_list_itr
));
741 v_list_itr
->fltr_info
.l_data
.vlan
.vlan_id
= vlan_id
;
742 v_list_itr
->fltr_info
.src_id
= ICE_SRC_ID_VSI
;
743 v_list_itr
->fltr_info
.fltr_act
= ICE_FWD_TO_VSI
;
744 v_list_itr
->fltr_info
.lkup_type
= ICE_SW_LKUP_VLAN
;
745 v_list_itr
->fltr_info
.flag
= ICE_FLTR_TX
;
746 v_list_itr
->fltr_info
.vsi_handle
= vsi
->idx
;
748 LIST_ADD(&v_list_itr
->list_entry
, &list_head
);
750 /* remove the vlan filter */
751 ret
= ice_remove_vlan(hw
, &list_head
);
752 if (ret
!= ICE_SUCCESS
) {
753 PMD_DRV_LOG(ERR
, "Failed to remove VLAN filter");
758 /* Remove the vlan id from vlan list */
759 TAILQ_REMOVE(&vsi
->vlan_list
, f
, next
);
765 rte_free(v_list_itr
);
770 ice_remove_all_mac_vlan_filters(struct ice_vsi
*vsi
)
772 struct ice_mac_filter
*m_f
;
773 struct ice_vlan_filter
*v_f
;
776 if (!vsi
|| !vsi
->mac_num
)
779 TAILQ_FOREACH(m_f
, &vsi
->mac_list
, next
) {
780 ret
= ice_remove_mac_filter(vsi
, &m_f
->mac_info
.mac_addr
);
781 if (ret
!= ICE_SUCCESS
) {
787 if (vsi
->vlan_num
== 0)
790 TAILQ_FOREACH(v_f
, &vsi
->vlan_list
, next
) {
791 ret
= ice_remove_vlan_filter(vsi
, v_f
->vlan_info
.vlan_id
);
792 if (ret
!= ICE_SUCCESS
) {
803 ice_vsi_config_qinq_insertion(struct ice_vsi
*vsi
, bool on
)
805 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
806 struct ice_vsi_ctx ctxt
;
810 /* Check if it has been already on or off */
811 if (vsi
->info
.valid_sections
&
812 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID
)) {
814 if ((vsi
->info
.outer_tag_flags
&
815 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST
) ==
816 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST
)
817 return 0; /* already on */
819 if (!(vsi
->info
.outer_tag_flags
&
820 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST
))
821 return 0; /* already off */
826 qinq_flags
= ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST
;
829 /* clear global insertion and use per packet insertion */
830 vsi
->info
.outer_tag_flags
&= ~(ICE_AQ_VSI_OUTER_TAG_INSERT
);
831 vsi
->info
.outer_tag_flags
&= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST
);
832 vsi
->info
.outer_tag_flags
|= qinq_flags
;
833 /* use default vlan type 0x8100 */
834 vsi
->info
.outer_tag_flags
&= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M
);
835 vsi
->info
.outer_tag_flags
|= ICE_DFLT_OUTER_TAG_TYPE
<<
836 ICE_AQ_VSI_OUTER_TAG_TYPE_S
;
837 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
838 ctxt
.info
.valid_sections
=
839 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID
);
840 ctxt
.vsi_num
= vsi
->vsi_id
;
841 ret
= ice_update_vsi(hw
, vsi
->idx
, &ctxt
, NULL
);
844 "Update VSI failed to %s qinq stripping",
845 on
? "enable" : "disable");
849 vsi
->info
.valid_sections
|=
850 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID
);
856 ice_vsi_config_qinq_stripping(struct ice_vsi
*vsi
, bool on
)
858 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
859 struct ice_vsi_ctx ctxt
;
863 /* Check if it has been already on or off */
864 if (vsi
->info
.valid_sections
&
865 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID
)) {
867 if ((vsi
->info
.outer_tag_flags
&
868 ICE_AQ_VSI_OUTER_TAG_MODE_M
) ==
869 ICE_AQ_VSI_OUTER_TAG_COPY
)
870 return 0; /* already on */
872 if ((vsi
->info
.outer_tag_flags
&
873 ICE_AQ_VSI_OUTER_TAG_MODE_M
) ==
874 ICE_AQ_VSI_OUTER_TAG_NOTHING
)
875 return 0; /* already off */
880 qinq_flags
= ICE_AQ_VSI_OUTER_TAG_COPY
;
882 qinq_flags
= ICE_AQ_VSI_OUTER_TAG_NOTHING
;
883 vsi
->info
.outer_tag_flags
&= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M
);
884 vsi
->info
.outer_tag_flags
|= qinq_flags
;
885 /* use default vlan type 0x8100 */
886 vsi
->info
.outer_tag_flags
&= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M
);
887 vsi
->info
.outer_tag_flags
|= ICE_DFLT_OUTER_TAG_TYPE
<<
888 ICE_AQ_VSI_OUTER_TAG_TYPE_S
;
889 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
890 ctxt
.info
.valid_sections
=
891 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID
);
892 ctxt
.vsi_num
= vsi
->vsi_id
;
893 ret
= ice_update_vsi(hw
, vsi
->idx
, &ctxt
, NULL
);
896 "Update VSI failed to %s qinq stripping",
897 on
? "enable" : "disable");
901 vsi
->info
.valid_sections
|=
902 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID
);
908 ice_vsi_config_double_vlan(struct ice_vsi
*vsi
, int on
)
912 ret
= ice_vsi_config_qinq_stripping(vsi
, on
);
914 PMD_DRV_LOG(ERR
, "Fail to set qinq stripping - %d", ret
);
916 ret
= ice_vsi_config_qinq_insertion(vsi
, on
);
918 PMD_DRV_LOG(ERR
, "Fail to set qinq insertion - %d", ret
);
925 ice_pf_enable_irq0(struct ice_hw
*hw
)
927 /* reset the registers */
928 ICE_WRITE_REG(hw
, PFINT_OICR_ENA
, 0);
929 ICE_READ_REG(hw
, PFINT_OICR
);
932 ICE_WRITE_REG(hw
, PFINT_OICR_ENA
,
933 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M
&
934 (~PFINT_OICR_LINK_STAT_CHANGE_M
)));
936 ICE_WRITE_REG(hw
, PFINT_OICR_CTL
,
937 (0 & PFINT_OICR_CTL_MSIX_INDX_M
) |
938 ((0 << PFINT_OICR_CTL_ITR_INDX_S
) &
939 PFINT_OICR_CTL_ITR_INDX_M
) |
940 PFINT_OICR_CTL_CAUSE_ENA_M
);
942 ICE_WRITE_REG(hw
, PFINT_FW_CTL
,
943 (0 & PFINT_FW_CTL_MSIX_INDX_M
) |
944 ((0 << PFINT_FW_CTL_ITR_INDX_S
) &
945 PFINT_FW_CTL_ITR_INDX_M
) |
946 PFINT_FW_CTL_CAUSE_ENA_M
);
948 ICE_WRITE_REG(hw
, PFINT_OICR_ENA
, PFINT_OICR_ENA_INT_ENA_M
);
951 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(0),
952 GLINT_DYN_CTL_INTENA_M
|
953 GLINT_DYN_CTL_CLEARPBA_M
|
954 GLINT_DYN_CTL_ITR_INDX_M
);
961 ice_pf_disable_irq0(struct ice_hw
*hw
)
963 /* Disable all interrupt types */
964 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M
);
970 ice_handle_aq_msg(struct rte_eth_dev
*dev
)
972 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
973 struct ice_ctl_q_info
*cq
= &hw
->adminq
;
974 struct ice_rq_event_info event
;
975 uint16_t pending
, opcode
;
978 event
.buf_len
= ICE_AQ_MAX_BUF_LEN
;
979 event
.msg_buf
= rte_zmalloc(NULL
, event
.buf_len
, 0);
980 if (!event
.msg_buf
) {
981 PMD_DRV_LOG(ERR
, "Failed to allocate mem");
987 ret
= ice_clean_rq_elem(hw
, cq
, &event
, &pending
);
989 if (ret
!= ICE_SUCCESS
) {
991 "Failed to read msg from AdminQ, "
993 hw
->adminq
.sq_last_status
);
996 opcode
= rte_le_to_cpu_16(event
.desc
.opcode
);
999 case ice_aqc_opc_get_link_status
:
1000 ret
= ice_link_update(dev
, 0);
1002 _rte_eth_dev_callback_process
1003 (dev
, RTE_ETH_EVENT_INTR_LSC
, NULL
);
1006 PMD_DRV_LOG(DEBUG
, "Request %u is not supported yet",
1011 rte_free(event
.msg_buf
);
1016 * Interrupt handler triggered by NIC for handling
1017 * specific interrupt.
1020 * Pointer to interrupt handle.
1022 * The address of parameter (struct rte_eth_dev *) regsitered before.
1028 ice_interrupt_handler(void *param
)
1030 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
1031 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1038 uint32_t int_fw_ctl
;
1041 /* Disable interrupt */
1042 ice_pf_disable_irq0(hw
);
1044 /* read out interrupt causes */
1045 oicr
= ICE_READ_REG(hw
, PFINT_OICR
);
1047 int_fw_ctl
= ICE_READ_REG(hw
, PFINT_FW_CTL
);
1050 /* No interrupt event indicated */
1051 if (!(oicr
& PFINT_OICR_INTEVENT_M
)) {
1052 PMD_DRV_LOG(INFO
, "No interrupt event");
1057 if (int_fw_ctl
& PFINT_FW_CTL_INTEVENT_M
) {
1058 PMD_DRV_LOG(INFO
, "FW_CTL: link state change event");
1059 ice_handle_aq_msg(dev
);
1062 if (oicr
& PFINT_OICR_LINK_STAT_CHANGE_M
) {
1063 PMD_DRV_LOG(INFO
, "OICR: link state change event");
1064 ice_link_update(dev
, 0);
1068 if (oicr
& PFINT_OICR_MAL_DETECT_M
) {
1069 PMD_DRV_LOG(WARNING
, "OICR: MDD event");
1070 reg
= ICE_READ_REG(hw
, GL_MDET_TX_PQM
);
1071 if (reg
& GL_MDET_TX_PQM_VALID_M
) {
1072 pf_num
= (reg
& GL_MDET_TX_PQM_PF_NUM_M
) >>
1073 GL_MDET_TX_PQM_PF_NUM_S
;
1074 event
= (reg
& GL_MDET_TX_PQM_MAL_TYPE_M
) >>
1075 GL_MDET_TX_PQM_MAL_TYPE_S
;
1076 queue
= (reg
& GL_MDET_TX_PQM_QNUM_M
) >>
1077 GL_MDET_TX_PQM_QNUM_S
;
1079 PMD_DRV_LOG(WARNING
, "Malicious Driver Detection event "
1080 "%d by PQM on TX queue %d PF# %d",
1081 event
, queue
, pf_num
);
1084 reg
= ICE_READ_REG(hw
, GL_MDET_TX_TCLAN
);
1085 if (reg
& GL_MDET_TX_TCLAN_VALID_M
) {
1086 pf_num
= (reg
& GL_MDET_TX_TCLAN_PF_NUM_M
) >>
1087 GL_MDET_TX_TCLAN_PF_NUM_S
;
1088 event
= (reg
& GL_MDET_TX_TCLAN_MAL_TYPE_M
) >>
1089 GL_MDET_TX_TCLAN_MAL_TYPE_S
;
1090 queue
= (reg
& GL_MDET_TX_TCLAN_QNUM_M
) >>
1091 GL_MDET_TX_TCLAN_QNUM_S
;
1093 PMD_DRV_LOG(WARNING
, "Malicious Driver Detection event "
1094 "%d by TCLAN on TX queue %d PF# %d",
1095 event
, queue
, pf_num
);
1099 /* Enable interrupt */
1100 ice_pf_enable_irq0(hw
);
1101 rte_intr_enable(dev
->intr_handle
);
1104 /* Initialize SW parameters of PF */
1106 ice_pf_sw_init(struct rte_eth_dev
*dev
)
1108 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1109 struct ice_hw
*hw
= ICE_PF_TO_HW(pf
);
1111 if (ice_config_max_queue_pair_num(dev
->device
->devargs
) > 0)
1113 ice_config_max_queue_pair_num(dev
->device
->devargs
);
1116 (uint16_t)RTE_MIN(hw
->func_caps
.common_cap
.num_txq
,
1117 hw
->func_caps
.common_cap
.num_rxq
);
1119 pf
->lan_nb_qps
= pf
->lan_nb_qp_max
;
1124 static struct ice_vsi
*
1125 ice_setup_vsi(struct ice_pf
*pf
, enum ice_vsi_type type
)
1127 struct ice_hw
*hw
= ICE_PF_TO_HW(pf
);
1128 struct ice_vsi
*vsi
= NULL
;
1129 struct ice_vsi_ctx vsi_ctx
;
1131 struct ether_addr broadcast
= {
1132 .addr_bytes
= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1133 struct ether_addr mac_addr
;
1134 uint16_t max_txqs
[ICE_MAX_TRAFFIC_CLASS
] = { 0 };
1135 uint8_t tc_bitmap
= 0x1;
1137 /* hw->num_lports = 1 in NIC mode */
1138 vsi
= rte_zmalloc(NULL
, sizeof(struct ice_vsi
), 0);
1142 vsi
->idx
= pf
->next_vsi_idx
;
1145 vsi
->adapter
= ICE_PF_TO_ADAPTER(pf
);
1146 vsi
->max_macaddrs
= ICE_NUM_MACADDR_MAX
;
1147 vsi
->vlan_anti_spoof_on
= 0;
1148 vsi
->vlan_filter_on
= 1;
1149 TAILQ_INIT(&vsi
->mac_list
);
1150 TAILQ_INIT(&vsi
->vlan_list
);
1152 memset(&vsi_ctx
, 0, sizeof(vsi_ctx
));
1153 /* base_queue in used in queue mapping of VSI add/update command.
1154 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1155 * cases in the first stage. Only Main VSI.
1157 vsi
->base_queue
= 0;
1160 vsi
->nb_qps
= pf
->lan_nb_qps
;
1161 ice_vsi_config_default_rss(&vsi_ctx
.info
);
1162 vsi_ctx
.alloc_from_pool
= true;
1163 vsi_ctx
.flags
= ICE_AQ_VSI_TYPE_PF
;
1164 /* switch_id is queried by get_switch_config aq, which is done
1167 vsi_ctx
.info
.sw_id
= hw
->port_info
->sw_id
;
1168 vsi_ctx
.info
.sw_flags2
= ICE_AQ_VSI_SW_FLAG_LAN_ENA
;
1169 /* Allow all untagged or tagged packets */
1170 vsi_ctx
.info
.vlan_flags
= ICE_AQ_VSI_VLAN_MODE_ALL
;
1171 vsi_ctx
.info
.vlan_flags
|= ICE_AQ_VSI_VLAN_EMOD_NOTHING
;
1172 vsi_ctx
.info
.q_opt_rss
= ICE_AQ_VSI_Q_OPT_RSS_LUT_PF
|
1173 ICE_AQ_VSI_Q_OPT_RSS_TPLZ
;
1174 /* Enable VLAN/UP trip */
1175 ret
= ice_vsi_config_tc_queue_mapping(vsi
,
1180 "tc queue mapping with vsi failed, "
1188 /* for other types of VSI */
1189 PMD_INIT_LOG(ERR
, "other types of VSI not supported");
1193 /* VF has MSIX interrupt in VF range, don't allocate here */
1194 if (type
== ICE_VSI_PF
) {
1195 ret
= ice_res_pool_alloc(&pf
->msix_pool
,
1196 RTE_MIN(vsi
->nb_qps
,
1197 RTE_MAX_RXTX_INTR_VEC_ID
));
1199 PMD_INIT_LOG(ERR
, "VSI MAIN %d get heap failed %d",
1202 vsi
->msix_intr
= ret
;
1203 vsi
->nb_msix
= RTE_MIN(vsi
->nb_qps
, RTE_MAX_RXTX_INTR_VEC_ID
);
1208 ret
= ice_add_vsi(hw
, vsi
->idx
, &vsi_ctx
, NULL
);
1209 if (ret
!= ICE_SUCCESS
) {
1210 PMD_INIT_LOG(ERR
, "add vsi failed, err = %d", ret
);
1213 /* store vsi information is SW structure */
1214 vsi
->vsi_id
= vsi_ctx
.vsi_num
;
1215 vsi
->info
= vsi_ctx
.info
;
1216 pf
->vsis_allocated
= vsi_ctx
.vsis_allocd
;
1217 pf
->vsis_unallocated
= vsi_ctx
.vsis_unallocated
;
1219 /* MAC configuration */
1220 rte_memcpy(pf
->dev_addr
.addr_bytes
,
1221 hw
->port_info
->mac
.perm_addr
,
1224 rte_memcpy(&mac_addr
, &pf
->dev_addr
, ETHER_ADDR_LEN
);
1225 ret
= ice_add_mac_filter(vsi
, &mac_addr
);
1226 if (ret
!= ICE_SUCCESS
)
1227 PMD_INIT_LOG(ERR
, "Failed to add dflt MAC filter");
1229 rte_memcpy(&mac_addr
, &broadcast
, ETHER_ADDR_LEN
);
1230 ret
= ice_add_mac_filter(vsi
, &mac_addr
);
1231 if (ret
!= ICE_SUCCESS
)
1232 PMD_INIT_LOG(ERR
, "Failed to add MAC filter");
1234 /* At the beginning, only TC0. */
1235 /* What we need here is the maximam number of the TX queues.
1236 * Currently vsi->nb_qps means it.
1237 * Correct it if any change.
1239 max_txqs
[0] = vsi
->nb_qps
;
1240 ret
= ice_cfg_vsi_lan(hw
->port_info
, vsi
->idx
,
1241 tc_bitmap
, max_txqs
);
1242 if (ret
!= ICE_SUCCESS
)
1243 PMD_INIT_LOG(ERR
, "Failed to config vsi sched");
1253 ice_send_driver_ver(struct ice_hw
*hw
)
1255 struct ice_driver_ver dv
;
1257 /* we don't have driver version use 0 for dummy */
1261 dv
.subbuild_ver
= 0;
1262 strncpy((char *)dv
.driver_string
, "dpdk", sizeof(dv
.driver_string
));
1264 return ice_aq_send_driver_ver(hw
, &dv
, NULL
);
1268 ice_pf_setup(struct ice_pf
*pf
)
1270 struct ice_vsi
*vsi
;
1272 /* Clear all stats counters */
1273 pf
->offset_loaded
= FALSE
;
1274 memset(&pf
->stats
, 0, sizeof(struct ice_hw_port_stats
));
1275 memset(&pf
->stats_offset
, 0, sizeof(struct ice_hw_port_stats
));
1276 memset(&pf
->internal_stats
, 0, sizeof(struct ice_eth_stats
));
1277 memset(&pf
->internal_stats_offset
, 0, sizeof(struct ice_eth_stats
));
1279 vsi
= ice_setup_vsi(pf
, ICE_VSI_PF
);
1281 PMD_INIT_LOG(ERR
, "Failed to add vsi for PF");
1290 static int ice_load_pkg(struct rte_eth_dev
*dev
)
1292 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1293 const char *pkg_file
= ICE_DFLT_PKG_FILE
;
1300 file
= fopen(pkg_file
, "rb");
1302 PMD_INIT_LOG(ERR
, "failed to open file: %s\n", pkg_file
);
1306 err
= stat(pkg_file
, &fstat
);
1308 PMD_INIT_LOG(ERR
, "failed to get file stats\n");
1313 buf_len
= fstat
.st_size
;
1314 buf
= rte_malloc(NULL
, buf_len
, 0);
1317 PMD_INIT_LOG(ERR
, "failed to allocate buf of size %d for package\n",
1323 err
= fread(buf
, buf_len
, 1, file
);
1325 PMD_INIT_LOG(ERR
, "failed to read package data\n");
1333 err
= ice_copy_and_init_pkg(hw
, buf
, buf_len
);
1335 PMD_INIT_LOG(ERR
, "ice_copy_and_init_hw failed: %d\n", err
);
1338 err
= ice_init_hw_tbls(hw
);
1340 PMD_INIT_LOG(ERR
, "ice_init_hw_tbls failed: %d\n", err
);
1341 goto fail_init_tbls
;
1347 rte_free(hw
->pkg_copy
);
1354 ice_dev_init(struct rte_eth_dev
*dev
)
1356 struct rte_pci_device
*pci_dev
;
1357 struct rte_intr_handle
*intr_handle
;
1358 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1359 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1360 struct ice_adapter
*ad
=
1361 ICE_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1362 struct ice_vsi
*vsi
;
1365 dev
->dev_ops
= &ice_eth_dev_ops
;
1366 dev
->rx_pkt_burst
= ice_recv_pkts
;
1367 dev
->tx_pkt_burst
= ice_xmit_pkts
;
1368 dev
->tx_pkt_prepare
= ice_prep_pkts
;
1370 ice_set_default_ptype_table(dev
);
1371 pci_dev
= RTE_DEV_TO_PCI(dev
->device
);
1372 intr_handle
= &pci_dev
->intr_handle
;
1374 pf
->adapter
= ICE_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1375 pf
->adapter
->eth_dev
= dev
;
1376 pf
->dev_data
= dev
->data
;
1377 hw
->back
= pf
->adapter
;
1378 hw
->hw_addr
= (uint8_t *)pci_dev
->mem_resource
[0].addr
;
1379 hw
->vendor_id
= pci_dev
->id
.vendor_id
;
1380 hw
->device_id
= pci_dev
->id
.device_id
;
1381 hw
->subsystem_vendor_id
= pci_dev
->id
.subsystem_vendor_id
;
1382 hw
->subsystem_device_id
= pci_dev
->id
.subsystem_device_id
;
1383 hw
->bus
.device
= pci_dev
->addr
.devid
;
1384 hw
->bus
.func
= pci_dev
->addr
.function
;
1386 ice_init_controlq_parameter(hw
);
1388 ret
= ice_init_hw(hw
);
1390 PMD_INIT_LOG(ERR
, "Failed to initialize HW");
1394 ret
= ice_load_pkg(dev
);
1396 PMD_INIT_LOG(WARNING
, "Failed to load the DDP package,"
1397 "Entering Safe Mode");
1398 ad
->is_safe_mode
= 1;
1401 PMD_INIT_LOG(INFO
, "FW %d.%d.%05d API %d.%d",
1402 hw
->fw_maj_ver
, hw
->fw_min_ver
, hw
->fw_build
,
1403 hw
->api_maj_ver
, hw
->api_min_ver
);
1405 ice_pf_sw_init(dev
);
1406 ret
= ice_init_mac_address(dev
);
1408 PMD_INIT_LOG(ERR
, "Failed to initialize mac address");
1412 ret
= ice_res_pool_init(&pf
->msix_pool
, 1,
1413 hw
->func_caps
.common_cap
.num_msix_vectors
- 1);
1415 PMD_INIT_LOG(ERR
, "Failed to init MSIX pool");
1416 goto err_msix_pool_init
;
1419 ret
= ice_pf_setup(pf
);
1421 PMD_INIT_LOG(ERR
, "Failed to setup PF");
1425 ret
= ice_send_driver_ver(hw
);
1427 PMD_INIT_LOG(ERR
, "Failed to send driver version");
1433 /* Disable double vlan by default */
1434 ice_vsi_config_double_vlan(vsi
, FALSE
);
1436 ret
= ice_aq_stop_lldp(hw
, TRUE
, NULL
);
1437 if (ret
!= ICE_SUCCESS
)
1438 PMD_INIT_LOG(DEBUG
, "lldp has already stopped\n");
1440 /* register callback func to eal lib */
1441 rte_intr_callback_register(intr_handle
,
1442 ice_interrupt_handler
, dev
);
1444 ice_pf_enable_irq0(hw
);
1446 /* enable uio intr after callback register */
1447 rte_intr_enable(intr_handle
);
1452 ice_res_pool_destroy(&pf
->msix_pool
);
1454 rte_free(dev
->data
->mac_addrs
);
1456 ice_sched_cleanup_all(hw
);
1457 rte_free(hw
->port_info
);
1458 ice_shutdown_all_ctrlq(hw
);
1464 ice_release_vsi(struct ice_vsi
*vsi
)
1467 struct ice_vsi_ctx vsi_ctx
;
1468 enum ice_status ret
;
1473 hw
= ICE_VSI_TO_HW(vsi
);
1475 ice_remove_all_mac_vlan_filters(vsi
);
1477 memset(&vsi_ctx
, 0, sizeof(vsi_ctx
));
1479 vsi_ctx
.vsi_num
= vsi
->vsi_id
;
1480 vsi_ctx
.info
= vsi
->info
;
1481 ret
= ice_free_vsi(hw
, vsi
->idx
, &vsi_ctx
, false, NULL
);
1482 if (ret
!= ICE_SUCCESS
) {
1483 PMD_INIT_LOG(ERR
, "Failed to free vsi by aq, %u", vsi
->vsi_id
);
1493 ice_vsi_disable_queues_intr(struct ice_vsi
*vsi
)
1495 struct rte_eth_dev
*dev
= vsi
->adapter
->eth_dev
;
1496 struct rte_pci_device
*pci_dev
= ICE_DEV_TO_PCI(dev
);
1497 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1498 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
1499 uint16_t msix_intr
, i
;
1501 /* disable interrupt and also clear all the exist config */
1502 for (i
= 0; i
< vsi
->nb_qps
; i
++) {
1503 ICE_WRITE_REG(hw
, QINT_TQCTL(vsi
->base_queue
+ i
), 0);
1504 ICE_WRITE_REG(hw
, QINT_RQCTL(vsi
->base_queue
+ i
), 0);
1508 if (rte_intr_allow_others(intr_handle
))
1510 for (i
= 0; i
< vsi
->nb_msix
; i
++) {
1511 msix_intr
= vsi
->msix_intr
+ i
;
1512 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(msix_intr
),
1513 GLINT_DYN_CTL_WB_ON_ITR_M
);
1517 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M
);
1521 ice_dev_stop(struct rte_eth_dev
*dev
)
1523 struct rte_eth_dev_data
*data
= dev
->data
;
1524 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1525 struct ice_vsi
*main_vsi
= pf
->main_vsi
;
1526 struct rte_pci_device
*pci_dev
= ICE_DEV_TO_PCI(dev
);
1527 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1530 /* avoid stopping again */
1531 if (pf
->adapter_stopped
)
1534 /* stop and clear all Rx queues */
1535 for (i
= 0; i
< data
->nb_rx_queues
; i
++)
1536 ice_rx_queue_stop(dev
, i
);
1538 /* stop and clear all Tx queues */
1539 for (i
= 0; i
< data
->nb_tx_queues
; i
++)
1540 ice_tx_queue_stop(dev
, i
);
1542 /* disable all queue interrupts */
1543 ice_vsi_disable_queues_intr(main_vsi
);
1545 /* Clear all queues and release mbufs */
1546 ice_clear_queues(dev
);
1548 /* Clean datapath event and queue/vec mapping */
1549 rte_intr_efd_disable(intr_handle
);
1550 if (intr_handle
->intr_vec
) {
1551 rte_free(intr_handle
->intr_vec
);
1552 intr_handle
->intr_vec
= NULL
;
1555 pf
->adapter_stopped
= true;
1559 ice_dev_close(struct rte_eth_dev
*dev
)
1561 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1562 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1566 /* release all queue resource */
1567 ice_free_queues(dev
);
1569 ice_res_pool_destroy(&pf
->msix_pool
);
1570 ice_release_vsi(pf
->main_vsi
);
1571 ice_sched_cleanup_all(hw
);
1572 rte_free(hw
->port_info
);
1573 ice_shutdown_all_ctrlq(hw
);
1577 ice_dev_uninit(struct rte_eth_dev
*dev
)
1579 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1580 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1584 dev
->dev_ops
= NULL
;
1585 dev
->rx_pkt_burst
= NULL
;
1586 dev
->tx_pkt_burst
= NULL
;
1588 rte_free(dev
->data
->mac_addrs
);
1589 dev
->data
->mac_addrs
= NULL
;
1591 /* disable uio intr before callback unregister */
1592 rte_intr_disable(intr_handle
);
1594 /* unregister callback func from eal lib */
1595 rte_intr_callback_unregister(intr_handle
,
1596 ice_interrupt_handler
, dev
);
1602 ice_dev_configure(__rte_unused
struct rte_eth_dev
*dev
)
1604 struct ice_adapter
*ad
=
1605 ICE_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1607 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1608 * bulk allocation or vector Rx preconditions we will reset it.
1610 ad
->rx_bulk_alloc_allowed
= true;
1611 ad
->tx_simple_allowed
= true;
1616 static int ice_init_rss(struct ice_pf
*pf
)
1618 struct ice_hw
*hw
= ICE_PF_TO_HW(pf
);
1619 struct ice_vsi
*vsi
= pf
->main_vsi
;
1620 struct rte_eth_dev
*dev
= pf
->adapter
->eth_dev
;
1621 struct rte_eth_rss_conf
*rss_conf
;
1622 struct ice_aqc_get_set_rss_keys key
;
1625 bool is_safe_mode
= pf
->adapter
->is_safe_mode
;
1627 rss_conf
= &dev
->data
->dev_conf
.rx_adv_conf
.rss_conf
;
1628 nb_q
= dev
->data
->nb_rx_queues
;
1629 vsi
->rss_key_size
= ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE
;
1630 vsi
->rss_lut_size
= hw
->func_caps
.common_cap
.rss_table_size
;
1633 PMD_DRV_LOG(WARNING
, "RSS is not supported in safe mode\n");
1638 vsi
->rss_key
= rte_zmalloc(NULL
,
1639 vsi
->rss_key_size
, 0);
1641 vsi
->rss_lut
= rte_zmalloc(NULL
,
1642 vsi
->rss_lut_size
, 0);
1644 /* configure RSS key */
1645 if (!rss_conf
->rss_key
) {
1646 /* Calculate the default hash key */
1647 for (i
= 0; i
<= vsi
->rss_key_size
; i
++)
1648 vsi
->rss_key
[i
] = (uint8_t)rte_rand();
1650 rte_memcpy(vsi
->rss_key
, rss_conf
->rss_key
,
1651 RTE_MIN(rss_conf
->rss_key_len
,
1652 vsi
->rss_key_size
));
1654 rte_memcpy(key
.standard_rss_key
, vsi
->rss_key
, vsi
->rss_key_size
);
1655 ret
= ice_aq_set_rss_key(hw
, vsi
->idx
, &key
);
1659 /* init RSS LUT table */
1660 for (i
= 0; i
< vsi
->rss_lut_size
; i
++)
1661 vsi
->rss_lut
[i
] = i
% nb_q
;
1663 ret
= ice_aq_set_rss_lut(hw
, vsi
->idx
,
1664 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF
,
1665 vsi
->rss_lut
, vsi
->rss_lut_size
);
1669 /* configure RSS for IPv4 with input set IPv4 src/dst */
1670 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_FLOW_HASH_IPV4
,
1671 ICE_FLOW_SEG_HDR_IPV4
);
1673 PMD_DRV_LOG(ERR
, "%s IPV4 rss flow fail %d", __func__
, ret
);
1675 /* configure RSS for IPv6 with input set IPv6 src/dst */
1676 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_FLOW_HASH_IPV6
,
1677 ICE_FLOW_SEG_HDR_IPV6
);
1679 PMD_DRV_LOG(ERR
, "%s IPV6 rss flow fail %d", __func__
, ret
);
1681 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1682 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_HASH_TCP_IPV6
,
1683 ICE_FLOW_SEG_HDR_TCP
| ICE_FLOW_SEG_HDR_IPV6
);
1685 PMD_DRV_LOG(ERR
, "%s TCP_IPV6 rss flow fail %d", __func__
, ret
);
1687 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1688 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_HASH_UDP_IPV6
,
1689 ICE_FLOW_SEG_HDR_UDP
| ICE_FLOW_SEG_HDR_IPV6
);
1691 PMD_DRV_LOG(ERR
, "%s UDP_IPV6 rss flow fail %d", __func__
, ret
);
1693 /* configure RSS for sctp6 with input set IPv6 src/dst */
1694 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_FLOW_HASH_IPV6
,
1695 ICE_FLOW_SEG_HDR_SCTP
| ICE_FLOW_SEG_HDR_IPV6
);
1697 PMD_DRV_LOG(ERR
, "%s SCTP_IPV6 rss flow fail %d",
1700 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1701 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_HASH_TCP_IPV4
,
1702 ICE_FLOW_SEG_HDR_TCP
| ICE_FLOW_SEG_HDR_IPV4
);
1704 PMD_DRV_LOG(ERR
, "%s TCP_IPV4 rss flow fail %d", __func__
, ret
);
1706 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1707 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_HASH_UDP_IPV4
,
1708 ICE_FLOW_SEG_HDR_UDP
| ICE_FLOW_SEG_HDR_IPV4
);
1710 PMD_DRV_LOG(ERR
, "%s UDP_IPV4 rss flow fail %d", __func__
, ret
);
1712 /* configure RSS for sctp4 with input set IP src/dst */
1713 ret
= ice_add_rss_cfg(hw
, vsi
->idx
, ICE_FLOW_HASH_IPV4
,
1714 ICE_FLOW_SEG_HDR_SCTP
| ICE_FLOW_SEG_HDR_IPV4
);
1716 PMD_DRV_LOG(ERR
, "%s SCTP_IPV4 rss flow fail %d",
1723 __vsi_queues_bind_intr(struct ice_vsi
*vsi
, uint16_t msix_vect
,
1724 int base_queue
, int nb_queue
)
1726 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
1727 uint32_t val
, val_tx
;
1730 for (i
= 0; i
< nb_queue
; i
++) {
1732 val
= (msix_vect
& QINT_RQCTL_MSIX_INDX_M
) |
1733 (0 < QINT_RQCTL_ITR_INDX_S
) | QINT_RQCTL_CAUSE_ENA_M
;
1734 val_tx
= (msix_vect
& QINT_TQCTL_MSIX_INDX_M
) |
1735 (0 < QINT_TQCTL_ITR_INDX_S
) | QINT_TQCTL_CAUSE_ENA_M
;
1737 PMD_DRV_LOG(INFO
, "queue %d is binding to vect %d",
1738 base_queue
+ i
, msix_vect
);
1739 /* set ITR0 value */
1740 ICE_WRITE_REG(hw
, GLINT_ITR(0, msix_vect
), 0x10);
1741 ICE_WRITE_REG(hw
, QINT_RQCTL(base_queue
+ i
), val
);
1742 ICE_WRITE_REG(hw
, QINT_TQCTL(base_queue
+ i
), val_tx
);
1747 ice_vsi_queues_bind_intr(struct ice_vsi
*vsi
)
1749 struct rte_eth_dev
*dev
= vsi
->adapter
->eth_dev
;
1750 struct rte_pci_device
*pci_dev
= ICE_DEV_TO_PCI(dev
);
1751 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1752 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
1753 uint16_t msix_vect
= vsi
->msix_intr
;
1754 uint16_t nb_msix
= RTE_MIN(vsi
->nb_msix
, intr_handle
->nb_efd
);
1755 uint16_t queue_idx
= 0;
1759 /* clear Rx/Tx queue interrupt */
1760 for (i
= 0; i
< vsi
->nb_used_qps
; i
++) {
1761 ICE_WRITE_REG(hw
, QINT_TQCTL(vsi
->base_queue
+ i
), 0);
1762 ICE_WRITE_REG(hw
, QINT_RQCTL(vsi
->base_queue
+ i
), 0);
1765 /* PF bind interrupt */
1766 if (rte_intr_dp_is_en(intr_handle
)) {
1771 for (i
= 0; i
< vsi
->nb_used_qps
; i
++) {
1773 if (!rte_intr_allow_others(intr_handle
))
1774 msix_vect
= ICE_MISC_VEC_ID
;
1776 /* uio mapping all queue to one msix_vect */
1777 __vsi_queues_bind_intr(vsi
, msix_vect
,
1778 vsi
->base_queue
+ i
,
1779 vsi
->nb_used_qps
- i
);
1781 for (; !!record
&& i
< vsi
->nb_used_qps
; i
++)
1782 intr_handle
->intr_vec
[queue_idx
+ i
] =
1787 /* vfio 1:1 queue/msix_vect mapping */
1788 __vsi_queues_bind_intr(vsi
, msix_vect
,
1789 vsi
->base_queue
+ i
, 1);
1792 intr_handle
->intr_vec
[queue_idx
+ i
] = msix_vect
;
1800 ice_vsi_enable_queues_intr(struct ice_vsi
*vsi
)
1802 struct rte_eth_dev
*dev
= vsi
->adapter
->eth_dev
;
1803 struct rte_pci_device
*pci_dev
= ICE_DEV_TO_PCI(dev
);
1804 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1805 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
1806 uint16_t msix_intr
, i
;
1808 if (rte_intr_allow_others(intr_handle
))
1809 for (i
= 0; i
< vsi
->nb_used_qps
; i
++) {
1810 msix_intr
= vsi
->msix_intr
+ i
;
1811 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(msix_intr
),
1812 GLINT_DYN_CTL_INTENA_M
|
1813 GLINT_DYN_CTL_CLEARPBA_M
|
1814 GLINT_DYN_CTL_ITR_INDX_M
|
1815 GLINT_DYN_CTL_WB_ON_ITR_M
);
1818 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(0),
1819 GLINT_DYN_CTL_INTENA_M
|
1820 GLINT_DYN_CTL_CLEARPBA_M
|
1821 GLINT_DYN_CTL_ITR_INDX_M
|
1822 GLINT_DYN_CTL_WB_ON_ITR_M
);
1826 ice_rxq_intr_setup(struct rte_eth_dev
*dev
)
1828 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1829 struct rte_pci_device
*pci_dev
= ICE_DEV_TO_PCI(dev
);
1830 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1831 struct ice_vsi
*vsi
= pf
->main_vsi
;
1832 uint32_t intr_vector
= 0;
1834 rte_intr_disable(intr_handle
);
1836 /* check and configure queue intr-vector mapping */
1837 if ((rte_intr_cap_multiple(intr_handle
) ||
1838 !RTE_ETH_DEV_SRIOV(dev
).active
) &&
1839 dev
->data
->dev_conf
.intr_conf
.rxq
!= 0) {
1840 intr_vector
= dev
->data
->nb_rx_queues
;
1841 if (intr_vector
> ICE_MAX_INTR_QUEUE_NUM
) {
1842 PMD_DRV_LOG(ERR
, "At most %d intr queues supported",
1843 ICE_MAX_INTR_QUEUE_NUM
);
1846 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
1850 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
1851 intr_handle
->intr_vec
=
1852 rte_zmalloc(NULL
, dev
->data
->nb_rx_queues
* sizeof(int),
1854 if (!intr_handle
->intr_vec
) {
1856 "Failed to allocate %d rx_queues intr_vec",
1857 dev
->data
->nb_rx_queues
);
1862 /* Map queues with MSIX interrupt */
1863 vsi
->nb_used_qps
= dev
->data
->nb_rx_queues
;
1864 ice_vsi_queues_bind_intr(vsi
);
1866 /* Enable interrupts for all the queues */
1867 ice_vsi_enable_queues_intr(vsi
);
1869 rte_intr_enable(intr_handle
);
1875 ice_dev_start(struct rte_eth_dev
*dev
)
1877 struct rte_eth_dev_data
*data
= dev
->data
;
1878 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1879 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1880 struct ice_vsi
*vsi
= pf
->main_vsi
;
1881 uint16_t nb_rxq
= 0;
1885 /* program Tx queues' context in hardware */
1886 for (nb_txq
= 0; nb_txq
< data
->nb_tx_queues
; nb_txq
++) {
1887 ret
= ice_tx_queue_start(dev
, nb_txq
);
1889 PMD_DRV_LOG(ERR
, "fail to start Tx queue %u", nb_txq
);
1894 /* program Rx queues' context in hardware*/
1895 for (nb_rxq
= 0; nb_rxq
< data
->nb_rx_queues
; nb_rxq
++) {
1896 ret
= ice_rx_queue_start(dev
, nb_rxq
);
1898 PMD_DRV_LOG(ERR
, "fail to start Rx queue %u", nb_rxq
);
1903 ret
= ice_init_rss(pf
);
1905 PMD_DRV_LOG(ERR
, "Failed to enable rss for PF");
1909 ice_set_rx_function(dev
);
1910 ice_set_tx_function(dev
);
1912 mask
= ETH_VLAN_STRIP_MASK
| ETH_VLAN_FILTER_MASK
|
1913 ETH_VLAN_EXTEND_MASK
;
1914 ret
= ice_vlan_offload_set(dev
, mask
);
1916 PMD_INIT_LOG(ERR
, "Unable to set VLAN offload");
1920 /* enable Rx interrput and mapping Rx queue to interrupt vector */
1921 if (ice_rxq_intr_setup(dev
))
1924 /* Enable receiving broadcast packets and transmitting packets */
1925 ret
= ice_set_vsi_promisc(hw
, vsi
->idx
,
1926 ICE_PROMISC_BCAST_RX
| ICE_PROMISC_BCAST_TX
|
1927 ICE_PROMISC_UCAST_TX
| ICE_PROMISC_MCAST_TX
,
1929 if (ret
!= ICE_SUCCESS
)
1930 PMD_DRV_LOG(INFO
, "fail to set vsi broadcast");
1932 ret
= ice_aq_set_event_mask(hw
, hw
->port_info
->lport
,
1933 ((u16
)(ICE_AQ_LINK_EVENT_LINK_FAULT
|
1934 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM
|
1935 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS
|
1936 ICE_AQ_LINK_EVENT_SIGNAL_DETECT
|
1937 ICE_AQ_LINK_EVENT_AN_COMPLETED
|
1938 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED
)),
1940 if (ret
!= ICE_SUCCESS
)
1941 PMD_DRV_LOG(WARNING
, "Fail to set phy mask");
1943 /* Call get_link_info aq commond to enable/disable LSE */
1944 ice_link_update(dev
, 0);
1946 pf
->adapter_stopped
= false;
1950 /* stop the started queues if failed to start all queues */
1952 for (i
= 0; i
< nb_rxq
; i
++)
1953 ice_rx_queue_stop(dev
, i
);
1955 for (i
= 0; i
< nb_txq
; i
++)
1956 ice_tx_queue_stop(dev
, i
);
1962 ice_dev_reset(struct rte_eth_dev
*dev
)
1966 if (dev
->data
->sriov
.active
)
1969 ret
= ice_dev_uninit(dev
);
1971 PMD_INIT_LOG(ERR
, "failed to uninit device, status = %d", ret
);
1975 ret
= ice_dev_init(dev
);
1977 PMD_INIT_LOG(ERR
, "failed to init device, status = %d", ret
);
1985 ice_dev_info_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
1987 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1988 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1989 struct ice_vsi
*vsi
= pf
->main_vsi
;
1990 struct rte_pci_device
*pci_dev
= RTE_DEV_TO_PCI(dev
->device
);
1991 bool is_safe_mode
= pf
->adapter
->is_safe_mode
;
1995 dev_info
->min_rx_bufsize
= ICE_BUF_SIZE_MIN
;
1996 dev_info
->max_rx_pktlen
= ICE_FRAME_SIZE_MAX
;
1997 dev_info
->max_rx_queues
= vsi
->nb_qps
;
1998 dev_info
->max_tx_queues
= vsi
->nb_qps
;
1999 dev_info
->max_mac_addrs
= vsi
->max_macaddrs
;
2000 dev_info
->max_vfs
= pci_dev
->max_vfs
;
2002 dev_info
->rx_offload_capa
=
2003 DEV_RX_OFFLOAD_VLAN_STRIP
|
2004 DEV_RX_OFFLOAD_JUMBO_FRAME
|
2005 DEV_RX_OFFLOAD_KEEP_CRC
|
2006 DEV_RX_OFFLOAD_SCATTER
|
2007 DEV_RX_OFFLOAD_VLAN_FILTER
;
2008 dev_info
->tx_offload_capa
=
2009 DEV_TX_OFFLOAD_VLAN_INSERT
|
2010 DEV_TX_OFFLOAD_TCP_TSO
|
2011 DEV_TX_OFFLOAD_MULTI_SEGS
|
2012 DEV_TX_OFFLOAD_MBUF_FAST_FREE
;
2013 dev_info
->flow_type_rss_offloads
= 0;
2015 if (!is_safe_mode
) {
2016 dev_info
->rx_offload_capa
|=
2017 DEV_RX_OFFLOAD_IPV4_CKSUM
|
2018 DEV_RX_OFFLOAD_UDP_CKSUM
|
2019 DEV_RX_OFFLOAD_TCP_CKSUM
|
2020 DEV_RX_OFFLOAD_QINQ_STRIP
|
2021 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
|
2022 DEV_RX_OFFLOAD_VLAN_EXTEND
;
2023 dev_info
->tx_offload_capa
|=
2024 DEV_TX_OFFLOAD_QINQ_INSERT
|
2025 DEV_TX_OFFLOAD_IPV4_CKSUM
|
2026 DEV_TX_OFFLOAD_UDP_CKSUM
|
2027 DEV_TX_OFFLOAD_TCP_CKSUM
|
2028 DEV_TX_OFFLOAD_SCTP_CKSUM
|
2029 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
;
2030 dev_info
->flow_type_rss_offloads
|= ICE_RSS_OFFLOAD_ALL
;
2033 dev_info
->rx_queue_offload_capa
= 0;
2034 dev_info
->tx_queue_offload_capa
= 0;
2036 dev_info
->reta_size
= hw
->func_caps
.common_cap
.rss_table_size
;
2037 dev_info
->hash_key_size
= (VSIQF_HKEY_MAX_INDEX
+ 1) * sizeof(uint32_t);
2039 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
2041 .pthresh
= ICE_DEFAULT_RX_PTHRESH
,
2042 .hthresh
= ICE_DEFAULT_RX_HTHRESH
,
2043 .wthresh
= ICE_DEFAULT_RX_WTHRESH
,
2045 .rx_free_thresh
= ICE_DEFAULT_RX_FREE_THRESH
,
2050 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
2052 .pthresh
= ICE_DEFAULT_TX_PTHRESH
,
2053 .hthresh
= ICE_DEFAULT_TX_HTHRESH
,
2054 .wthresh
= ICE_DEFAULT_TX_WTHRESH
,
2056 .tx_free_thresh
= ICE_DEFAULT_TX_FREE_THRESH
,
2057 .tx_rs_thresh
= ICE_DEFAULT_TX_RSBIT_THRESH
,
2061 dev_info
->rx_desc_lim
= (struct rte_eth_desc_lim
) {
2062 .nb_max
= ICE_MAX_RING_DESC
,
2063 .nb_min
= ICE_MIN_RING_DESC
,
2064 .nb_align
= ICE_ALIGN_RING_DESC
,
2067 dev_info
->tx_desc_lim
= (struct rte_eth_desc_lim
) {
2068 .nb_max
= ICE_MAX_RING_DESC
,
2069 .nb_min
= ICE_MIN_RING_DESC
,
2070 .nb_align
= ICE_ALIGN_RING_DESC
,
2073 dev_info
->speed_capa
= ETH_LINK_SPEED_10M
|
2074 ETH_LINK_SPEED_100M
|
2076 ETH_LINK_SPEED_2_5G
|
2078 ETH_LINK_SPEED_10G
|
2079 ETH_LINK_SPEED_20G
|
2082 phy_type_low
= hw
->port_info
->phy
.phy_type_low
;
2083 phy_type_high
= hw
->port_info
->phy
.phy_type_high
;
2085 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low
))
2086 dev_info
->speed_capa
|= ETH_LINK_SPEED_50G
;
2088 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low
) ||
2089 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high
))
2090 dev_info
->speed_capa
|= ETH_LINK_SPEED_100G
;
2092 dev_info
->nb_rx_queues
= dev
->data
->nb_rx_queues
;
2093 dev_info
->nb_tx_queues
= dev
->data
->nb_tx_queues
;
2095 dev_info
->default_rxportconf
.burst_size
= ICE_RX_MAX_BURST
;
2096 dev_info
->default_txportconf
.burst_size
= ICE_TX_MAX_BURST
;
2097 dev_info
->default_rxportconf
.nb_queues
= 1;
2098 dev_info
->default_txportconf
.nb_queues
= 1;
2099 dev_info
->default_rxportconf
.ring_size
= ICE_BUF_SIZE_MIN
;
2100 dev_info
->default_txportconf
.ring_size
= ICE_BUF_SIZE_MIN
;
2104 ice_atomic_read_link_status(struct rte_eth_dev
*dev
,
2105 struct rte_eth_link
*link
)
2107 struct rte_eth_link
*dst
= link
;
2108 struct rte_eth_link
*src
= &dev
->data
->dev_link
;
2110 if (rte_atomic64_cmpset((uint64_t *)dst
, *(uint64_t *)dst
,
2111 *(uint64_t *)src
) == 0)
2118 ice_atomic_write_link_status(struct rte_eth_dev
*dev
,
2119 struct rte_eth_link
*link
)
2121 struct rte_eth_link
*dst
= &dev
->data
->dev_link
;
2122 struct rte_eth_link
*src
= link
;
2124 if (rte_atomic64_cmpset((uint64_t *)dst
, *(uint64_t *)dst
,
2125 *(uint64_t *)src
) == 0)
2132 ice_link_update(struct rte_eth_dev
*dev
, int wait_to_complete
)
2134 #define CHECK_INTERVAL 100 /* 100ms */
2135 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
2136 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2137 struct ice_link_status link_status
;
2138 struct rte_eth_link link
, old
;
2140 unsigned int rep_cnt
= MAX_REPEAT_TIME
;
2141 bool enable_lse
= dev
->data
->dev_conf
.intr_conf
.lsc
? true : false;
2143 memset(&link
, 0, sizeof(link
));
2144 memset(&old
, 0, sizeof(old
));
2145 memset(&link_status
, 0, sizeof(link_status
));
2146 ice_atomic_read_link_status(dev
, &old
);
2149 /* Get link status information from hardware */
2150 status
= ice_aq_get_link_info(hw
->port_info
, enable_lse
,
2151 &link_status
, NULL
);
2152 if (status
!= ICE_SUCCESS
) {
2153 link
.link_speed
= ETH_SPEED_NUM_100M
;
2154 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
2155 PMD_DRV_LOG(ERR
, "Failed to get link info");
2159 link
.link_status
= link_status
.link_info
& ICE_AQ_LINK_UP
;
2160 if (!wait_to_complete
|| link
.link_status
)
2163 rte_delay_ms(CHECK_INTERVAL
);
2164 } while (--rep_cnt
);
2166 if (!link
.link_status
)
2169 /* Full-duplex operation at all supported speeds */
2170 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
2172 /* Parse the link status */
2173 switch (link_status
.link_speed
) {
2174 case ICE_AQ_LINK_SPEED_10MB
:
2175 link
.link_speed
= ETH_SPEED_NUM_10M
;
2177 case ICE_AQ_LINK_SPEED_100MB
:
2178 link
.link_speed
= ETH_SPEED_NUM_100M
;
2180 case ICE_AQ_LINK_SPEED_1000MB
:
2181 link
.link_speed
= ETH_SPEED_NUM_1G
;
2183 case ICE_AQ_LINK_SPEED_2500MB
:
2184 link
.link_speed
= ETH_SPEED_NUM_2_5G
;
2186 case ICE_AQ_LINK_SPEED_5GB
:
2187 link
.link_speed
= ETH_SPEED_NUM_5G
;
2189 case ICE_AQ_LINK_SPEED_10GB
:
2190 link
.link_speed
= ETH_SPEED_NUM_10G
;
2192 case ICE_AQ_LINK_SPEED_20GB
:
2193 link
.link_speed
= ETH_SPEED_NUM_20G
;
2195 case ICE_AQ_LINK_SPEED_25GB
:
2196 link
.link_speed
= ETH_SPEED_NUM_25G
;
2198 case ICE_AQ_LINK_SPEED_40GB
:
2199 link
.link_speed
= ETH_SPEED_NUM_40G
;
2201 case ICE_AQ_LINK_SPEED_50GB
:
2202 link
.link_speed
= ETH_SPEED_NUM_50G
;
2204 case ICE_AQ_LINK_SPEED_100GB
:
2205 link
.link_speed
= ETH_SPEED_NUM_100G
;
2207 case ICE_AQ_LINK_SPEED_UNKNOWN
:
2209 PMD_DRV_LOG(ERR
, "Unknown link speed");
2210 link
.link_speed
= ETH_SPEED_NUM_NONE
;
2214 link
.link_autoneg
= !(dev
->data
->dev_conf
.link_speeds
&
2215 ETH_LINK_SPEED_FIXED
);
2218 ice_atomic_write_link_status(dev
, &link
);
2219 if (link
.link_status
== old
.link_status
)
2226 ice_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
)
2228 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2229 struct rte_eth_dev_data
*dev_data
= pf
->dev_data
;
2230 uint32_t frame_size
= mtu
+ ICE_ETH_OVERHEAD
;
2232 /* check if mtu is within the allowed range */
2233 if (mtu
< ETHER_MIN_MTU
|| frame_size
> ICE_FRAME_SIZE_MAX
)
2236 /* mtu setting is forbidden if port is start */
2237 if (dev_data
->dev_started
) {
2239 "port %d must be stopped before configuration",
2244 if (frame_size
> ETHER_MAX_LEN
)
2245 dev_data
->dev_conf
.rxmode
.offloads
|=
2246 DEV_RX_OFFLOAD_JUMBO_FRAME
;
2248 dev_data
->dev_conf
.rxmode
.offloads
&=
2249 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
2251 dev_data
->dev_conf
.rxmode
.max_rx_pkt_len
= frame_size
;
2256 static int ice_macaddr_set(struct rte_eth_dev
*dev
,
2257 struct ether_addr
*mac_addr
)
2259 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2260 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2261 struct ice_vsi
*vsi
= pf
->main_vsi
;
2262 struct ice_mac_filter
*f
;
2266 if (!is_valid_assigned_ether_addr(mac_addr
)) {
2267 PMD_DRV_LOG(ERR
, "Tried to set invalid MAC address.");
2271 TAILQ_FOREACH(f
, &vsi
->mac_list
, next
) {
2272 if (is_same_ether_addr(&pf
->dev_addr
, &f
->mac_info
.mac_addr
))
2277 PMD_DRV_LOG(ERR
, "Failed to find filter for default mac");
2281 ret
= ice_remove_mac_filter(vsi
, &f
->mac_info
.mac_addr
);
2282 if (ret
!= ICE_SUCCESS
) {
2283 PMD_DRV_LOG(ERR
, "Failed to delete mac filter");
2286 ret
= ice_add_mac_filter(vsi
, mac_addr
);
2287 if (ret
!= ICE_SUCCESS
) {
2288 PMD_DRV_LOG(ERR
, "Failed to add mac filter");
2291 memcpy(&pf
->dev_addr
, mac_addr
, ETH_ADDR_LEN
);
2293 flags
= ICE_AQC_MAN_MAC_UPDATE_LAA_WOL
;
2294 ret
= ice_aq_manage_mac_write(hw
, mac_addr
->addr_bytes
, flags
, NULL
);
2295 if (ret
!= ICE_SUCCESS
)
2296 PMD_DRV_LOG(ERR
, "Failed to set manage mac");
2301 /* Add a MAC address, and update filters */
2303 ice_macaddr_add(struct rte_eth_dev
*dev
,
2304 struct ether_addr
*mac_addr
,
2305 __rte_unused
uint32_t index
,
2306 __rte_unused
uint32_t pool
)
2308 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2309 struct ice_vsi
*vsi
= pf
->main_vsi
;
2312 ret
= ice_add_mac_filter(vsi
, mac_addr
);
2313 if (ret
!= ICE_SUCCESS
) {
2314 PMD_DRV_LOG(ERR
, "Failed to add MAC filter");
2321 /* Remove a MAC address, and update filters */
2323 ice_macaddr_remove(struct rte_eth_dev
*dev
, uint32_t index
)
2325 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2326 struct ice_vsi
*vsi
= pf
->main_vsi
;
2327 struct rte_eth_dev_data
*data
= dev
->data
;
2328 struct ether_addr
*macaddr
;
2331 macaddr
= &data
->mac_addrs
[index
];
2332 ret
= ice_remove_mac_filter(vsi
, macaddr
);
2334 PMD_DRV_LOG(ERR
, "Failed to remove MAC filter");
2340 ice_vlan_filter_set(struct rte_eth_dev
*dev
, uint16_t vlan_id
, int on
)
2342 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2343 struct ice_vsi
*vsi
= pf
->main_vsi
;
2346 PMD_INIT_FUNC_TRACE();
2349 ret
= ice_add_vlan_filter(vsi
, vlan_id
);
2351 PMD_DRV_LOG(ERR
, "Failed to add vlan filter");
2355 ret
= ice_remove_vlan_filter(vsi
, vlan_id
);
2357 PMD_DRV_LOG(ERR
, "Failed to remove vlan filter");
2365 /* Configure vlan filter on or off */
2367 ice_vsi_config_vlan_filter(struct ice_vsi
*vsi
, bool on
)
2369 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
2370 struct ice_vsi_ctx ctxt
;
2371 uint8_t sec_flags
, sw_flags2
;
2374 sec_flags
= ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA
<<
2375 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S
;
2376 sw_flags2
= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA
;
2379 vsi
->info
.sec_flags
|= sec_flags
;
2380 vsi
->info
.sw_flags2
|= sw_flags2
;
2382 vsi
->info
.sec_flags
&= ~sec_flags
;
2383 vsi
->info
.sw_flags2
&= ~sw_flags2
;
2385 vsi
->info
.sw_id
= hw
->port_info
->sw_id
;
2386 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
2387 ctxt
.info
.valid_sections
=
2388 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID
|
2389 ICE_AQ_VSI_PROP_SECURITY_VALID
);
2390 ctxt
.vsi_num
= vsi
->vsi_id
;
2392 ret
= ice_update_vsi(hw
, vsi
->idx
, &ctxt
, NULL
);
2394 PMD_DRV_LOG(INFO
, "Update VSI failed to %s vlan rx pruning",
2395 on
? "enable" : "disable");
2398 vsi
->info
.valid_sections
|=
2399 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID
|
2400 ICE_AQ_VSI_PROP_SECURITY_VALID
);
2403 /* consist with other drivers, allow untagged packet when vlan filter on */
2405 ret
= ice_add_vlan_filter(vsi
, 0);
2407 ret
= ice_remove_vlan_filter(vsi
, 0);
2413 ice_vsi_config_vlan_stripping(struct ice_vsi
*vsi
, bool on
)
2415 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
2416 struct ice_vsi_ctx ctxt
;
2420 /* Check if it has been already on or off */
2421 if (vsi
->info
.valid_sections
&
2422 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID
)) {
2424 if ((vsi
->info
.vlan_flags
&
2425 ICE_AQ_VSI_VLAN_EMOD_M
) ==
2426 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH
)
2427 return 0; /* already on */
2429 if ((vsi
->info
.vlan_flags
&
2430 ICE_AQ_VSI_VLAN_EMOD_M
) ==
2431 ICE_AQ_VSI_VLAN_EMOD_NOTHING
)
2432 return 0; /* already off */
2437 vlan_flags
= ICE_AQ_VSI_VLAN_EMOD_STR_BOTH
;
2439 vlan_flags
= ICE_AQ_VSI_VLAN_EMOD_NOTHING
;
2440 vsi
->info
.vlan_flags
&= ~(ICE_AQ_VSI_VLAN_EMOD_M
);
2441 vsi
->info
.vlan_flags
|= vlan_flags
;
2442 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
2443 ctxt
.info
.valid_sections
=
2444 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID
);
2445 ctxt
.vsi_num
= vsi
->vsi_id
;
2446 ret
= ice_update_vsi(hw
, vsi
->idx
, &ctxt
, NULL
);
2448 PMD_DRV_LOG(INFO
, "Update VSI failed to %s vlan stripping",
2449 on
? "enable" : "disable");
2453 vsi
->info
.valid_sections
|=
2454 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID
);
2460 ice_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
)
2462 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2463 struct ice_vsi
*vsi
= pf
->main_vsi
;
2464 struct rte_eth_rxmode
*rxmode
;
2466 rxmode
= &dev
->data
->dev_conf
.rxmode
;
2467 if (mask
& ETH_VLAN_FILTER_MASK
) {
2468 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)
2469 ice_vsi_config_vlan_filter(vsi
, TRUE
);
2471 ice_vsi_config_vlan_filter(vsi
, FALSE
);
2474 if (mask
& ETH_VLAN_STRIP_MASK
) {
2475 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
2476 ice_vsi_config_vlan_stripping(vsi
, TRUE
);
2478 ice_vsi_config_vlan_stripping(vsi
, FALSE
);
2481 if (mask
& ETH_VLAN_EXTEND_MASK
) {
2482 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_EXTEND
)
2483 ice_vsi_config_double_vlan(vsi
, TRUE
);
2485 ice_vsi_config_double_vlan(vsi
, FALSE
);
2492 ice_vlan_tpid_set(struct rte_eth_dev
*dev
,
2493 enum rte_vlan_type vlan_type
,
2496 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2497 uint64_t reg_r
= 0, reg_w
= 0;
2498 uint16_t reg_id
= 0;
2500 int qinq
= dev
->data
->dev_conf
.rxmode
.offloads
&
2501 DEV_RX_OFFLOAD_VLAN_EXTEND
;
2503 switch (vlan_type
) {
2504 case ETH_VLAN_TYPE_OUTER
:
2510 case ETH_VLAN_TYPE_INNER
:
2515 "Unsupported vlan type in single vlan.");
2520 PMD_DRV_LOG(ERR
, "Unsupported vlan type %d", vlan_type
);
2523 reg_r
= ICE_READ_REG(hw
, GL_SWT_L2TAGCTRL(reg_id
));
2524 PMD_DRV_LOG(DEBUG
, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2525 "0x%08"PRIx64
"", reg_id
, reg_r
);
2527 reg_w
= reg_r
& (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M
));
2528 reg_w
|= ((uint64_t)tpid
<< GL_SWT_L2TAGCTRL_ETHERTYPE_S
);
2529 if (reg_r
== reg_w
) {
2530 PMD_DRV_LOG(DEBUG
, "No need to write");
2534 ICE_WRITE_REG(hw
, GL_SWT_L2TAGCTRL(reg_id
), reg_w
);
2535 PMD_DRV_LOG(DEBUG
, "Debug write 0x%08"PRIx64
" to "
2536 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w
, reg_id
);
2542 ice_get_rss_lut(struct ice_vsi
*vsi
, uint8_t *lut
, uint16_t lut_size
)
2544 struct ice_pf
*pf
= ICE_VSI_TO_PF(vsi
);
2545 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
2551 if (pf
->flags
& ICE_FLAG_RSS_AQ_CAPABLE
) {
2552 ret
= ice_aq_get_rss_lut(hw
, vsi
->idx
, TRUE
,
2555 PMD_DRV_LOG(ERR
, "Failed to get RSS lookup table");
2559 uint64_t *lut_dw
= (uint64_t *)lut
;
2560 uint16_t i
, lut_size_dw
= lut_size
/ 4;
2562 for (i
= 0; i
< lut_size_dw
; i
++)
2563 lut_dw
[i
] = ICE_READ_REG(hw
, PFQF_HLUT(i
));
2570 ice_set_rss_lut(struct ice_vsi
*vsi
, uint8_t *lut
, uint16_t lut_size
)
2579 pf
= ICE_VSI_TO_PF(vsi
);
2580 hw
= ICE_VSI_TO_HW(vsi
);
2582 if (pf
->flags
& ICE_FLAG_RSS_AQ_CAPABLE
) {
2583 ret
= ice_aq_set_rss_lut(hw
, vsi
->idx
, TRUE
,
2586 PMD_DRV_LOG(ERR
, "Failed to set RSS lookup table");
2590 uint64_t *lut_dw
= (uint64_t *)lut
;
2591 uint16_t i
, lut_size_dw
= lut_size
/ 4;
2593 for (i
= 0; i
< lut_size_dw
; i
++)
2594 ICE_WRITE_REG(hw
, PFQF_HLUT(i
), lut_dw
[i
]);
2603 ice_rss_reta_update(struct rte_eth_dev
*dev
,
2604 struct rte_eth_rss_reta_entry64
*reta_conf
,
2607 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2608 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2609 uint16_t i
, lut_size
= hw
->func_caps
.common_cap
.rss_table_size
;
2610 uint16_t idx
, shift
;
2614 if (reta_size
!= lut_size
||
2615 reta_size
> ETH_RSS_RETA_SIZE_512
) {
2617 "The size of hash lookup table configured (%d)"
2618 "doesn't match the number hardware can "
2620 reta_size
, lut_size
);
2624 lut
= rte_zmalloc(NULL
, reta_size
, 0);
2626 PMD_DRV_LOG(ERR
, "No memory can be allocated");
2629 ret
= ice_get_rss_lut(pf
->main_vsi
, lut
, reta_size
);
2633 for (i
= 0; i
< reta_size
; i
++) {
2634 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2635 shift
= i
% RTE_RETA_GROUP_SIZE
;
2636 if (reta_conf
[idx
].mask
& (1ULL << shift
))
2637 lut
[i
] = reta_conf
[idx
].reta
[shift
];
2639 ret
= ice_set_rss_lut(pf
->main_vsi
, lut
, reta_size
);
2648 ice_rss_reta_query(struct rte_eth_dev
*dev
,
2649 struct rte_eth_rss_reta_entry64
*reta_conf
,
2652 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2653 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2654 uint16_t i
, lut_size
= hw
->func_caps
.common_cap
.rss_table_size
;
2655 uint16_t idx
, shift
;
2659 if (reta_size
!= lut_size
||
2660 reta_size
> ETH_RSS_RETA_SIZE_512
) {
2662 "The size of hash lookup table configured (%d)"
2663 "doesn't match the number hardware can "
2665 reta_size
, lut_size
);
2669 lut
= rte_zmalloc(NULL
, reta_size
, 0);
2671 PMD_DRV_LOG(ERR
, "No memory can be allocated");
2675 ret
= ice_get_rss_lut(pf
->main_vsi
, lut
, reta_size
);
2679 for (i
= 0; i
< reta_size
; i
++) {
2680 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2681 shift
= i
% RTE_RETA_GROUP_SIZE
;
2682 if (reta_conf
[idx
].mask
& (1ULL << shift
))
2683 reta_conf
[idx
].reta
[shift
] = lut
[i
];
2693 ice_set_rss_key(struct ice_vsi
*vsi
, uint8_t *key
, uint8_t key_len
)
2695 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
2698 if (!key
|| key_len
== 0) {
2699 PMD_DRV_LOG(DEBUG
, "No key to be configured");
2701 } else if (key_len
!= (VSIQF_HKEY_MAX_INDEX
+ 1) *
2703 PMD_DRV_LOG(ERR
, "Invalid key length %u", key_len
);
2707 struct ice_aqc_get_set_rss_keys
*key_dw
=
2708 (struct ice_aqc_get_set_rss_keys
*)key
;
2710 ret
= ice_aq_set_rss_key(hw
, vsi
->idx
, key_dw
);
2712 PMD_DRV_LOG(ERR
, "Failed to configure RSS key via AQ");
2720 ice_get_rss_key(struct ice_vsi
*vsi
, uint8_t *key
, uint8_t *key_len
)
2722 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
2725 if (!key
|| !key_len
)
2728 ret
= ice_aq_get_rss_key
2730 (struct ice_aqc_get_set_rss_keys
*)key
);
2732 PMD_DRV_LOG(ERR
, "Failed to get RSS key via AQ");
2735 *key_len
= (VSIQF_HKEY_MAX_INDEX
+ 1) * sizeof(uint32_t);
2741 ice_rss_hash_update(struct rte_eth_dev
*dev
,
2742 struct rte_eth_rss_conf
*rss_conf
)
2744 enum ice_status status
= ICE_SUCCESS
;
2745 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2746 struct ice_vsi
*vsi
= pf
->main_vsi
;
2749 status
= ice_set_rss_key(vsi
, rss_conf
->rss_key
, rss_conf
->rss_key_len
);
2753 /* TODO: hash enable config, ice_add_rss_cfg */
2758 ice_rss_hash_conf_get(struct rte_eth_dev
*dev
,
2759 struct rte_eth_rss_conf
*rss_conf
)
2761 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2762 struct ice_vsi
*vsi
= pf
->main_vsi
;
2764 ice_get_rss_key(vsi
, rss_conf
->rss_key
,
2765 &rss_conf
->rss_key_len
);
2767 /* TODO: default set to 0 as hf config is not supported now */
2768 rss_conf
->rss_hf
= 0;
2773 ice_promisc_enable(struct rte_eth_dev
*dev
)
2775 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2776 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2777 struct ice_vsi
*vsi
= pf
->main_vsi
;
2778 enum ice_status status
;
2781 pmask
= ICE_PROMISC_UCAST_RX
| ICE_PROMISC_UCAST_TX
|
2782 ICE_PROMISC_MCAST_RX
| ICE_PROMISC_MCAST_TX
;
2784 status
= ice_set_vsi_promisc(hw
, vsi
->idx
, pmask
, 0);
2785 if (status
== ICE_ERR_ALREADY_EXISTS
)
2786 PMD_DRV_LOG(DEBUG
, "Promisc mode has already been enabled");
2787 else if (status
!= ICE_SUCCESS
)
2788 PMD_DRV_LOG(ERR
, "Failed to enable promisc, err=%d", status
);
2792 ice_promisc_disable(struct rte_eth_dev
*dev
)
2794 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2795 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2796 struct ice_vsi
*vsi
= pf
->main_vsi
;
2797 enum ice_status status
;
2800 pmask
= ICE_PROMISC_UCAST_RX
| ICE_PROMISC_UCAST_TX
|
2801 ICE_PROMISC_MCAST_RX
| ICE_PROMISC_MCAST_TX
;
2803 status
= ice_clear_vsi_promisc(hw
, vsi
->idx
, pmask
, 0);
2804 if (status
!= ICE_SUCCESS
)
2805 PMD_DRV_LOG(ERR
, "Failed to clear promisc, err=%d", status
);
2809 ice_allmulti_enable(struct rte_eth_dev
*dev
)
2811 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2812 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2813 struct ice_vsi
*vsi
= pf
->main_vsi
;
2814 enum ice_status status
;
2817 pmask
= ICE_PROMISC_MCAST_RX
| ICE_PROMISC_MCAST_TX
;
2819 status
= ice_set_vsi_promisc(hw
, vsi
->idx
, pmask
, 0);
2820 if (status
!= ICE_SUCCESS
)
2821 PMD_DRV_LOG(ERR
, "Failed to enable allmulti, err=%d", status
);
2825 ice_allmulti_disable(struct rte_eth_dev
*dev
)
2827 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2828 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2829 struct ice_vsi
*vsi
= pf
->main_vsi
;
2830 enum ice_status status
;
2833 if (dev
->data
->promiscuous
== 1)
2834 return; /* must remain in all_multicast mode */
2836 pmask
= ICE_PROMISC_MCAST_RX
| ICE_PROMISC_MCAST_TX
;
2838 status
= ice_clear_vsi_promisc(hw
, vsi
->idx
, pmask
, 0);
2839 if (status
!= ICE_SUCCESS
)
2840 PMD_DRV_LOG(ERR
, "Failed to clear allmulti, err=%d", status
);
2843 static int ice_rx_queue_intr_enable(struct rte_eth_dev
*dev
,
2846 struct rte_pci_device
*pci_dev
= ICE_DEV_TO_PCI(dev
);
2847 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
2848 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2852 msix_intr
= intr_handle
->intr_vec
[queue_id
];
2854 val
= GLINT_DYN_CTL_INTENA_M
| GLINT_DYN_CTL_CLEARPBA_M
|
2855 GLINT_DYN_CTL_ITR_INDX_M
;
2856 val
&= ~GLINT_DYN_CTL_WB_ON_ITR_M
;
2858 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(msix_intr
), val
);
2859 rte_intr_enable(&pci_dev
->intr_handle
);
2864 static int ice_rx_queue_intr_disable(struct rte_eth_dev
*dev
,
2867 struct rte_pci_device
*pci_dev
= ICE_DEV_TO_PCI(dev
);
2868 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
2869 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2872 msix_intr
= intr_handle
->intr_vec
[queue_id
];
2874 ICE_WRITE_REG(hw
, GLINT_DYN_CTL(msix_intr
), GLINT_DYN_CTL_WB_ON_ITR_M
);
2880 ice_fw_version_get(struct rte_eth_dev
*dev
, char *fw_version
, size_t fw_size
)
2882 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2888 full_ver
= hw
->nvm
.oem_ver
;
2889 ver
= (u8
)(full_ver
>> 24);
2890 build
= (u16
)((full_ver
>> 8) & 0xffff);
2891 patch
= (u8
)(full_ver
& 0xff);
2893 ret
= snprintf(fw_version
, fw_size
,
2894 "%d.%d%d 0x%08x %d.%d.%d",
2895 ((hw
->nvm
.ver
>> 12) & 0xf),
2896 ((hw
->nvm
.ver
>> 4) & 0xff),
2897 (hw
->nvm
.ver
& 0xf), hw
->nvm
.eetrack
,
2900 /* add the size of '\0' */
2902 if (fw_size
< (u32
)ret
)
2909 ice_vsi_vlan_pvid_set(struct ice_vsi
*vsi
, struct ice_vsi_vlan_pvid_info
*info
)
2912 struct ice_vsi_ctx ctxt
;
2913 uint8_t vlan_flags
= 0;
2916 if (!vsi
|| !info
) {
2917 PMD_DRV_LOG(ERR
, "invalid parameters");
2922 vsi
->info
.pvid
= info
->config
.pvid
;
2924 * If insert pvid is enabled, only tagged pkts are
2925 * allowed to be sent out.
2927 vlan_flags
= ICE_AQ_VSI_PVLAN_INSERT_PVID
|
2928 ICE_AQ_VSI_VLAN_MODE_UNTAGGED
;
2931 if (info
->config
.reject
.tagged
== 0)
2932 vlan_flags
|= ICE_AQ_VSI_VLAN_MODE_TAGGED
;
2934 if (info
->config
.reject
.untagged
== 0)
2935 vlan_flags
|= ICE_AQ_VSI_VLAN_MODE_UNTAGGED
;
2937 vsi
->info
.vlan_flags
&= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID
|
2938 ICE_AQ_VSI_VLAN_MODE_M
);
2939 vsi
->info
.vlan_flags
|= vlan_flags
;
2940 memset(&ctxt
, 0, sizeof(ctxt
));
2941 rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
2942 ctxt
.info
.valid_sections
=
2943 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID
);
2944 ctxt
.vsi_num
= vsi
->vsi_id
;
2946 hw
= ICE_VSI_TO_HW(vsi
);
2947 ret
= ice_update_vsi(hw
, vsi
->idx
, &ctxt
, NULL
);
2948 if (ret
!= ICE_SUCCESS
) {
2950 "update VSI for VLAN insert failed, err %d",
2955 vsi
->info
.valid_sections
|=
2956 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID
);
2962 ice_vlan_pvid_set(struct rte_eth_dev
*dev
, uint16_t pvid
, int on
)
2964 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2965 struct ice_vsi
*vsi
= pf
->main_vsi
;
2966 struct rte_eth_dev_data
*data
= pf
->dev_data
;
2967 struct ice_vsi_vlan_pvid_info info
;
2970 memset(&info
, 0, sizeof(info
));
2973 info
.config
.pvid
= pvid
;
2975 info
.config
.reject
.tagged
=
2976 data
->dev_conf
.txmode
.hw_vlan_reject_tagged
;
2977 info
.config
.reject
.untagged
=
2978 data
->dev_conf
.txmode
.hw_vlan_reject_untagged
;
2981 ret
= ice_vsi_vlan_pvid_set(vsi
, &info
);
2983 PMD_DRV_LOG(ERR
, "Failed to set pvid.");
2991 ice_get_eeprom_length(struct rte_eth_dev
*dev
)
2993 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2995 /* Convert word count to byte count */
2996 return hw
->nvm
.sr_words
<< 1;
3000 ice_get_eeprom(struct rte_eth_dev
*dev
,
3001 struct rte_dev_eeprom_info
*eeprom
)
3003 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3004 uint16_t *data
= eeprom
->data
;
3005 uint16_t first_word
, last_word
, nwords
;
3006 enum ice_status status
= ICE_SUCCESS
;
3008 first_word
= eeprom
->offset
>> 1;
3009 last_word
= (eeprom
->offset
+ eeprom
->length
- 1) >> 1;
3010 nwords
= last_word
- first_word
+ 1;
3012 if (first_word
>= hw
->nvm
.sr_words
||
3013 last_word
>= hw
->nvm
.sr_words
) {
3014 PMD_DRV_LOG(ERR
, "Requested EEPROM bytes out of range.");
3018 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
3020 status
= ice_read_sr_buf(hw
, first_word
, &nwords
, data
);
3022 PMD_DRV_LOG(ERR
, "EEPROM read failed.");
3023 eeprom
->length
= sizeof(uint16_t) * nwords
;
3031 ice_stat_update_32(struct ice_hw
*hw
,
3039 new_data
= (uint64_t)ICE_READ_REG(hw
, reg
);
3043 if (new_data
>= *offset
)
3044 *stat
= (uint64_t)(new_data
- *offset
);
3046 *stat
= (uint64_t)((new_data
+
3047 ((uint64_t)1 << ICE_32_BIT_WIDTH
))
3052 ice_stat_update_40(struct ice_hw
*hw
,
3061 new_data
= (uint64_t)ICE_READ_REG(hw
, loreg
);
3062 new_data
|= (uint64_t)(ICE_READ_REG(hw
, hireg
) & ICE_8_BIT_MASK
) <<
3068 if (new_data
>= *offset
)
3069 *stat
= new_data
- *offset
;
3071 *stat
= (uint64_t)((new_data
+
3072 ((uint64_t)1 << ICE_40_BIT_WIDTH
)) -
3075 *stat
&= ICE_40_BIT_MASK
;
3078 /* Get all the statistics of a VSI */
3080 ice_update_vsi_stats(struct ice_vsi
*vsi
)
3082 struct ice_eth_stats
*oes
= &vsi
->eth_stats_offset
;
3083 struct ice_eth_stats
*nes
= &vsi
->eth_stats
;
3084 struct ice_hw
*hw
= ICE_VSI_TO_HW(vsi
);
3085 int idx
= rte_le_to_cpu_16(vsi
->vsi_id
);
3087 ice_stat_update_40(hw
, GLV_GORCH(idx
), GLV_GORCL(idx
),
3088 vsi
->offset_loaded
, &oes
->rx_bytes
,
3090 ice_stat_update_40(hw
, GLV_UPRCH(idx
), GLV_UPRCL(idx
),
3091 vsi
->offset_loaded
, &oes
->rx_unicast
,
3093 ice_stat_update_40(hw
, GLV_MPRCH(idx
), GLV_MPRCL(idx
),
3094 vsi
->offset_loaded
, &oes
->rx_multicast
,
3095 &nes
->rx_multicast
);
3096 ice_stat_update_40(hw
, GLV_BPRCH(idx
), GLV_BPRCL(idx
),
3097 vsi
->offset_loaded
, &oes
->rx_broadcast
,
3098 &nes
->rx_broadcast
);
3099 /* exclude CRC bytes */
3100 nes
->rx_bytes
-= (nes
->rx_unicast
+ nes
->rx_multicast
+
3101 nes
->rx_broadcast
) * ETHER_CRC_LEN
;
3103 ice_stat_update_32(hw
, GLV_RDPC(idx
), vsi
->offset_loaded
,
3104 &oes
->rx_discards
, &nes
->rx_discards
);
3105 /* GLV_REPC not supported */
3106 /* GLV_RMPC not supported */
3107 ice_stat_update_32(hw
, GLSWID_RUPP(idx
), vsi
->offset_loaded
,
3108 &oes
->rx_unknown_protocol
,
3109 &nes
->rx_unknown_protocol
);
3110 ice_stat_update_40(hw
, GLV_GOTCH(idx
), GLV_GOTCL(idx
),
3111 vsi
->offset_loaded
, &oes
->tx_bytes
,
3113 ice_stat_update_40(hw
, GLV_UPTCH(idx
), GLV_UPTCL(idx
),
3114 vsi
->offset_loaded
, &oes
->tx_unicast
,
3116 ice_stat_update_40(hw
, GLV_MPTCH(idx
), GLV_MPTCL(idx
),
3117 vsi
->offset_loaded
, &oes
->tx_multicast
,
3118 &nes
->tx_multicast
);
3119 ice_stat_update_40(hw
, GLV_BPTCH(idx
), GLV_BPTCL(idx
),
3120 vsi
->offset_loaded
, &oes
->tx_broadcast
,
3121 &nes
->tx_broadcast
);
3122 /* GLV_TDPC not supported */
3123 ice_stat_update_32(hw
, GLV_TEPC(idx
), vsi
->offset_loaded
,
3124 &oes
->tx_errors
, &nes
->tx_errors
);
3125 vsi
->offset_loaded
= true;
3127 PMD_DRV_LOG(DEBUG
, "************** VSI[%u] stats start **************",
3129 PMD_DRV_LOG(DEBUG
, "rx_bytes: %"PRIu64
"", nes
->rx_bytes
);
3130 PMD_DRV_LOG(DEBUG
, "rx_unicast: %"PRIu64
"", nes
->rx_unicast
);
3131 PMD_DRV_LOG(DEBUG
, "rx_multicast: %"PRIu64
"", nes
->rx_multicast
);
3132 PMD_DRV_LOG(DEBUG
, "rx_broadcast: %"PRIu64
"", nes
->rx_broadcast
);
3133 PMD_DRV_LOG(DEBUG
, "rx_discards: %"PRIu64
"", nes
->rx_discards
);
3134 PMD_DRV_LOG(DEBUG
, "rx_unknown_protocol: %"PRIu64
"",
3135 nes
->rx_unknown_protocol
);
3136 PMD_DRV_LOG(DEBUG
, "tx_bytes: %"PRIu64
"", nes
->tx_bytes
);
3137 PMD_DRV_LOG(DEBUG
, "tx_unicast: %"PRIu64
"", nes
->tx_unicast
);
3138 PMD_DRV_LOG(DEBUG
, "tx_multicast: %"PRIu64
"", nes
->tx_multicast
);
3139 PMD_DRV_LOG(DEBUG
, "tx_broadcast: %"PRIu64
"", nes
->tx_broadcast
);
3140 PMD_DRV_LOG(DEBUG
, "tx_discards: %"PRIu64
"", nes
->tx_discards
);
3141 PMD_DRV_LOG(DEBUG
, "tx_errors: %"PRIu64
"", nes
->tx_errors
);
3142 PMD_DRV_LOG(DEBUG
, "************** VSI[%u] stats end ****************",
3147 ice_read_stats_registers(struct ice_pf
*pf
, struct ice_hw
*hw
)
3149 struct ice_hw_port_stats
*ns
= &pf
->stats
; /* new stats */
3150 struct ice_hw_port_stats
*os
= &pf
->stats_offset
; /* old stats */
3152 /* Get statistics of struct ice_eth_stats */
3153 ice_stat_update_40(hw
, GLPRT_GORCH(hw
->port_info
->lport
),
3154 GLPRT_GORCL(hw
->port_info
->lport
),
3155 pf
->offset_loaded
, &os
->eth
.rx_bytes
,
3157 ice_stat_update_40(hw
, GLPRT_UPRCH(hw
->port_info
->lport
),
3158 GLPRT_UPRCL(hw
->port_info
->lport
),
3159 pf
->offset_loaded
, &os
->eth
.rx_unicast
,
3160 &ns
->eth
.rx_unicast
);
3161 ice_stat_update_40(hw
, GLPRT_MPRCH(hw
->port_info
->lport
),
3162 GLPRT_MPRCL(hw
->port_info
->lport
),
3163 pf
->offset_loaded
, &os
->eth
.rx_multicast
,
3164 &ns
->eth
.rx_multicast
);
3165 ice_stat_update_40(hw
, GLPRT_BPRCH(hw
->port_info
->lport
),
3166 GLPRT_BPRCL(hw
->port_info
->lport
),
3167 pf
->offset_loaded
, &os
->eth
.rx_broadcast
,
3168 &ns
->eth
.rx_broadcast
);
3169 ice_stat_update_32(hw
, PRTRPB_RDPC
,
3170 pf
->offset_loaded
, &os
->eth
.rx_discards
,
3171 &ns
->eth
.rx_discards
);
3173 /* Workaround: CRC size should not be included in byte statistics,
3174 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
3176 ns
->eth
.rx_bytes
-= (ns
->eth
.rx_unicast
+ ns
->eth
.rx_multicast
+
3177 ns
->eth
.rx_broadcast
) * ETHER_CRC_LEN
;
3179 /* GLPRT_REPC not supported */
3180 /* GLPRT_RMPC not supported */
3181 ice_stat_update_32(hw
, GLSWID_RUPP(hw
->port_info
->lport
),
3183 &os
->eth
.rx_unknown_protocol
,
3184 &ns
->eth
.rx_unknown_protocol
);
3185 ice_stat_update_40(hw
, GLPRT_GOTCH(hw
->port_info
->lport
),
3186 GLPRT_GOTCL(hw
->port_info
->lport
),
3187 pf
->offset_loaded
, &os
->eth
.tx_bytes
,
3189 ice_stat_update_40(hw
, GLPRT_UPTCH(hw
->port_info
->lport
),
3190 GLPRT_UPTCL(hw
->port_info
->lport
),
3191 pf
->offset_loaded
, &os
->eth
.tx_unicast
,
3192 &ns
->eth
.tx_unicast
);
3193 ice_stat_update_40(hw
, GLPRT_MPTCH(hw
->port_info
->lport
),
3194 GLPRT_MPTCL(hw
->port_info
->lport
),
3195 pf
->offset_loaded
, &os
->eth
.tx_multicast
,
3196 &ns
->eth
.tx_multicast
);
3197 ice_stat_update_40(hw
, GLPRT_BPTCH(hw
->port_info
->lport
),
3198 GLPRT_BPTCL(hw
->port_info
->lport
),
3199 pf
->offset_loaded
, &os
->eth
.tx_broadcast
,
3200 &ns
->eth
.tx_broadcast
);
3201 ns
->eth
.tx_bytes
-= (ns
->eth
.tx_unicast
+ ns
->eth
.tx_multicast
+
3202 ns
->eth
.tx_broadcast
) * ETHER_CRC_LEN
;
3204 /* GLPRT_TEPC not supported */
3206 /* additional port specific stats */
3207 ice_stat_update_32(hw
, GLPRT_TDOLD(hw
->port_info
->lport
),
3208 pf
->offset_loaded
, &os
->tx_dropped_link_down
,
3209 &ns
->tx_dropped_link_down
);
3210 ice_stat_update_32(hw
, GLPRT_CRCERRS(hw
->port_info
->lport
),
3211 pf
->offset_loaded
, &os
->crc_errors
,
3213 ice_stat_update_32(hw
, GLPRT_ILLERRC(hw
->port_info
->lport
),
3214 pf
->offset_loaded
, &os
->illegal_bytes
,
3215 &ns
->illegal_bytes
);
3216 /* GLPRT_ERRBC not supported */
3217 ice_stat_update_32(hw
, GLPRT_MLFC(hw
->port_info
->lport
),
3218 pf
->offset_loaded
, &os
->mac_local_faults
,
3219 &ns
->mac_local_faults
);
3220 ice_stat_update_32(hw
, GLPRT_MRFC(hw
->port_info
->lport
),
3221 pf
->offset_loaded
, &os
->mac_remote_faults
,
3222 &ns
->mac_remote_faults
);
3224 ice_stat_update_32(hw
, GLPRT_RLEC(hw
->port_info
->lport
),
3225 pf
->offset_loaded
, &os
->rx_len_errors
,
3226 &ns
->rx_len_errors
);
3228 ice_stat_update_32(hw
, GLPRT_LXONRXC(hw
->port_info
->lport
),
3229 pf
->offset_loaded
, &os
->link_xon_rx
,
3231 ice_stat_update_32(hw
, GLPRT_LXOFFRXC(hw
->port_info
->lport
),
3232 pf
->offset_loaded
, &os
->link_xoff_rx
,
3234 ice_stat_update_32(hw
, GLPRT_LXONTXC(hw
->port_info
->lport
),
3235 pf
->offset_loaded
, &os
->link_xon_tx
,
3237 ice_stat_update_32(hw
, GLPRT_LXOFFTXC(hw
->port_info
->lport
),
3238 pf
->offset_loaded
, &os
->link_xoff_tx
,
3240 ice_stat_update_40(hw
, GLPRT_PRC64H(hw
->port_info
->lport
),
3241 GLPRT_PRC64L(hw
->port_info
->lport
),
3242 pf
->offset_loaded
, &os
->rx_size_64
,
3244 ice_stat_update_40(hw
, GLPRT_PRC127H(hw
->port_info
->lport
),
3245 GLPRT_PRC127L(hw
->port_info
->lport
),
3246 pf
->offset_loaded
, &os
->rx_size_127
,
3248 ice_stat_update_40(hw
, GLPRT_PRC255H(hw
->port_info
->lport
),
3249 GLPRT_PRC255L(hw
->port_info
->lport
),
3250 pf
->offset_loaded
, &os
->rx_size_255
,
3252 ice_stat_update_40(hw
, GLPRT_PRC511H(hw
->port_info
->lport
),
3253 GLPRT_PRC511L(hw
->port_info
->lport
),
3254 pf
->offset_loaded
, &os
->rx_size_511
,
3256 ice_stat_update_40(hw
, GLPRT_PRC1023H(hw
->port_info
->lport
),
3257 GLPRT_PRC1023L(hw
->port_info
->lport
),
3258 pf
->offset_loaded
, &os
->rx_size_1023
,
3260 ice_stat_update_40(hw
, GLPRT_PRC1522H(hw
->port_info
->lport
),
3261 GLPRT_PRC1522L(hw
->port_info
->lport
),
3262 pf
->offset_loaded
, &os
->rx_size_1522
,
3264 ice_stat_update_40(hw
, GLPRT_PRC9522H(hw
->port_info
->lport
),
3265 GLPRT_PRC9522L(hw
->port_info
->lport
),
3266 pf
->offset_loaded
, &os
->rx_size_big
,
3268 ice_stat_update_32(hw
, GLPRT_RUC(hw
->port_info
->lport
),
3269 pf
->offset_loaded
, &os
->rx_undersize
,
3271 ice_stat_update_32(hw
, GLPRT_RFC(hw
->port_info
->lport
),
3272 pf
->offset_loaded
, &os
->rx_fragments
,
3274 ice_stat_update_32(hw
, GLPRT_ROC(hw
->port_info
->lport
),
3275 pf
->offset_loaded
, &os
->rx_oversize
,
3277 ice_stat_update_32(hw
, GLPRT_RJC(hw
->port_info
->lport
),
3278 pf
->offset_loaded
, &os
->rx_jabber
,
3280 ice_stat_update_40(hw
, GLPRT_PTC64H(hw
->port_info
->lport
),
3281 GLPRT_PTC64L(hw
->port_info
->lport
),
3282 pf
->offset_loaded
, &os
->tx_size_64
,
3284 ice_stat_update_40(hw
, GLPRT_PTC127H(hw
->port_info
->lport
),
3285 GLPRT_PTC127L(hw
->port_info
->lport
),
3286 pf
->offset_loaded
, &os
->tx_size_127
,
3288 ice_stat_update_40(hw
, GLPRT_PTC255H(hw
->port_info
->lport
),
3289 GLPRT_PTC255L(hw
->port_info
->lport
),
3290 pf
->offset_loaded
, &os
->tx_size_255
,
3292 ice_stat_update_40(hw
, GLPRT_PTC511H(hw
->port_info
->lport
),
3293 GLPRT_PTC511L(hw
->port_info
->lport
),
3294 pf
->offset_loaded
, &os
->tx_size_511
,
3296 ice_stat_update_40(hw
, GLPRT_PTC1023H(hw
->port_info
->lport
),
3297 GLPRT_PTC1023L(hw
->port_info
->lport
),
3298 pf
->offset_loaded
, &os
->tx_size_1023
,
3300 ice_stat_update_40(hw
, GLPRT_PTC1522H(hw
->port_info
->lport
),
3301 GLPRT_PTC1522L(hw
->port_info
->lport
),
3302 pf
->offset_loaded
, &os
->tx_size_1522
,
3304 ice_stat_update_40(hw
, GLPRT_PTC9522H(hw
->port_info
->lport
),
3305 GLPRT_PTC9522L(hw
->port_info
->lport
),
3306 pf
->offset_loaded
, &os
->tx_size_big
,
3309 /* GLPRT_MSPDC not supported */
3310 /* GLPRT_XEC not supported */
3312 pf
->offset_loaded
= true;
3315 ice_update_vsi_stats(pf
->main_vsi
);
3318 /* Get all statistics of a port */
3320 ice_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
3322 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3323 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3324 struct ice_hw_port_stats
*ns
= &pf
->stats
; /* new stats */
3326 /* call read registers - updates values, now write them to struct */
3327 ice_read_stats_registers(pf
, hw
);
3329 stats
->ipackets
= pf
->main_vsi
->eth_stats
.rx_unicast
+
3330 pf
->main_vsi
->eth_stats
.rx_multicast
+
3331 pf
->main_vsi
->eth_stats
.rx_broadcast
-
3332 pf
->main_vsi
->eth_stats
.rx_discards
;
3333 stats
->opackets
= ns
->eth
.tx_unicast
+
3334 ns
->eth
.tx_multicast
+
3335 ns
->eth
.tx_broadcast
;
3336 stats
->ibytes
= pf
->main_vsi
->eth_stats
.rx_bytes
;
3337 stats
->obytes
= ns
->eth
.tx_bytes
;
3338 stats
->oerrors
= ns
->eth
.tx_errors
+
3339 pf
->main_vsi
->eth_stats
.tx_errors
;
3342 stats
->imissed
= ns
->eth
.rx_discards
+
3343 pf
->main_vsi
->eth_stats
.rx_discards
;
3344 stats
->ierrors
= ns
->crc_errors
+
3346 ns
->rx_oversize
+ ns
->rx_fragments
+ ns
->rx_jabber
;
3348 PMD_DRV_LOG(DEBUG
, "*************** PF stats start *****************");
3349 PMD_DRV_LOG(DEBUG
, "rx_bytes: %"PRIu64
"", ns
->eth
.rx_bytes
);
3350 PMD_DRV_LOG(DEBUG
, "rx_unicast: %"PRIu64
"", ns
->eth
.rx_unicast
);
3351 PMD_DRV_LOG(DEBUG
, "rx_multicast:%"PRIu64
"", ns
->eth
.rx_multicast
);
3352 PMD_DRV_LOG(DEBUG
, "rx_broadcast:%"PRIu64
"", ns
->eth
.rx_broadcast
);
3353 PMD_DRV_LOG(DEBUG
, "rx_discards:%"PRIu64
"", ns
->eth
.rx_discards
);
3354 PMD_DRV_LOG(DEBUG
, "vsi rx_discards:%"PRIu64
"",
3355 pf
->main_vsi
->eth_stats
.rx_discards
);
3356 PMD_DRV_LOG(DEBUG
, "rx_unknown_protocol: %"PRIu64
"",
3357 ns
->eth
.rx_unknown_protocol
);
3358 PMD_DRV_LOG(DEBUG
, "tx_bytes: %"PRIu64
"", ns
->eth
.tx_bytes
);
3359 PMD_DRV_LOG(DEBUG
, "tx_unicast: %"PRIu64
"", ns
->eth
.tx_unicast
);
3360 PMD_DRV_LOG(DEBUG
, "tx_multicast:%"PRIu64
"", ns
->eth
.tx_multicast
);
3361 PMD_DRV_LOG(DEBUG
, "tx_broadcast:%"PRIu64
"", ns
->eth
.tx_broadcast
);
3362 PMD_DRV_LOG(DEBUG
, "tx_discards:%"PRIu64
"", ns
->eth
.tx_discards
);
3363 PMD_DRV_LOG(DEBUG
, "vsi tx_discards:%"PRIu64
"",
3364 pf
->main_vsi
->eth_stats
.tx_discards
);
3365 PMD_DRV_LOG(DEBUG
, "tx_errors: %"PRIu64
"", ns
->eth
.tx_errors
);
3367 PMD_DRV_LOG(DEBUG
, "tx_dropped_link_down: %"PRIu64
"",
3368 ns
->tx_dropped_link_down
);
3369 PMD_DRV_LOG(DEBUG
, "crc_errors: %"PRIu64
"", ns
->crc_errors
);
3370 PMD_DRV_LOG(DEBUG
, "illegal_bytes: %"PRIu64
"",
3372 PMD_DRV_LOG(DEBUG
, "error_bytes: %"PRIu64
"", ns
->error_bytes
);
3373 PMD_DRV_LOG(DEBUG
, "mac_local_faults: %"PRIu64
"",
3374 ns
->mac_local_faults
);
3375 PMD_DRV_LOG(DEBUG
, "mac_remote_faults: %"PRIu64
"",
3376 ns
->mac_remote_faults
);
3377 PMD_DRV_LOG(DEBUG
, "link_xon_rx: %"PRIu64
"", ns
->link_xon_rx
);
3378 PMD_DRV_LOG(DEBUG
, "link_xoff_rx: %"PRIu64
"", ns
->link_xoff_rx
);
3379 PMD_DRV_LOG(DEBUG
, "link_xon_tx: %"PRIu64
"", ns
->link_xon_tx
);
3380 PMD_DRV_LOG(DEBUG
, "link_xoff_tx: %"PRIu64
"", ns
->link_xoff_tx
);
3381 PMD_DRV_LOG(DEBUG
, "rx_size_64: %"PRIu64
"", ns
->rx_size_64
);
3382 PMD_DRV_LOG(DEBUG
, "rx_size_127: %"PRIu64
"", ns
->rx_size_127
);
3383 PMD_DRV_LOG(DEBUG
, "rx_size_255: %"PRIu64
"", ns
->rx_size_255
);
3384 PMD_DRV_LOG(DEBUG
, "rx_size_511: %"PRIu64
"", ns
->rx_size_511
);
3385 PMD_DRV_LOG(DEBUG
, "rx_size_1023: %"PRIu64
"", ns
->rx_size_1023
);
3386 PMD_DRV_LOG(DEBUG
, "rx_size_1522: %"PRIu64
"", ns
->rx_size_1522
);
3387 PMD_DRV_LOG(DEBUG
, "rx_size_big: %"PRIu64
"", ns
->rx_size_big
);
3388 PMD_DRV_LOG(DEBUG
, "rx_undersize: %"PRIu64
"", ns
->rx_undersize
);
3389 PMD_DRV_LOG(DEBUG
, "rx_fragments: %"PRIu64
"", ns
->rx_fragments
);
3390 PMD_DRV_LOG(DEBUG
, "rx_oversize: %"PRIu64
"", ns
->rx_oversize
);
3391 PMD_DRV_LOG(DEBUG
, "rx_jabber: %"PRIu64
"", ns
->rx_jabber
);
3392 PMD_DRV_LOG(DEBUG
, "tx_size_64: %"PRIu64
"", ns
->tx_size_64
);
3393 PMD_DRV_LOG(DEBUG
, "tx_size_127: %"PRIu64
"", ns
->tx_size_127
);
3394 PMD_DRV_LOG(DEBUG
, "tx_size_255: %"PRIu64
"", ns
->tx_size_255
);
3395 PMD_DRV_LOG(DEBUG
, "tx_size_511: %"PRIu64
"", ns
->tx_size_511
);
3396 PMD_DRV_LOG(DEBUG
, "tx_size_1023: %"PRIu64
"", ns
->tx_size_1023
);
3397 PMD_DRV_LOG(DEBUG
, "tx_size_1522: %"PRIu64
"", ns
->tx_size_1522
);
3398 PMD_DRV_LOG(DEBUG
, "tx_size_big: %"PRIu64
"", ns
->tx_size_big
);
3399 PMD_DRV_LOG(DEBUG
, "rx_len_errors: %"PRIu64
"", ns
->rx_len_errors
);
3400 PMD_DRV_LOG(DEBUG
, "************* PF stats end ****************");
3404 /* Reset the statistics */
3406 ice_stats_reset(struct rte_eth_dev
*dev
)
3408 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3409 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3411 /* Mark PF and VSI stats to update the offset, aka "reset" */
3412 pf
->offset_loaded
= false;
3414 pf
->main_vsi
->offset_loaded
= false;
3416 /* read the stats, reading current register values into offset */
3417 ice_read_stats_registers(pf
, hw
);
3421 ice_xstats_calc_num(void)
3425 num
= ICE_NB_ETH_XSTATS
+ ICE_NB_HW_PORT_XSTATS
;
3431 ice_xstats_get(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
3434 struct ice_pf
*pf
= ICE_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3435 struct ice_hw
*hw
= ICE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3438 struct ice_hw_port_stats
*hw_stats
= &pf
->stats
;
3440 count
= ice_xstats_calc_num();
3444 ice_read_stats_registers(pf
, hw
);
3451 /* Get stats from ice_eth_stats struct */
3452 for (i
= 0; i
< ICE_NB_ETH_XSTATS
; i
++) {
3453 xstats
[count
].value
=
3454 *(uint64_t *)((char *)&hw_stats
->eth
+
3455 ice_stats_strings
[i
].offset
);
3456 xstats
[count
].id
= count
;
3460 /* Get individiual stats from ice_hw_port struct */
3461 for (i
= 0; i
< ICE_NB_HW_PORT_XSTATS
; i
++) {
3462 xstats
[count
].value
=
3463 *(uint64_t *)((char *)hw_stats
+
3464 ice_hw_port_strings
[i
].offset
);
3465 xstats
[count
].id
= count
;
3472 static int ice_xstats_get_names(__rte_unused
struct rte_eth_dev
*dev
,
3473 struct rte_eth_xstat_name
*xstats_names
,
3474 __rte_unused
unsigned int limit
)
3476 unsigned int count
= 0;
3480 return ice_xstats_calc_num();
3482 /* Note: limit checked in rte_eth_xstats_names() */
3484 /* Get stats from ice_eth_stats struct */
3485 for (i
= 0; i
< ICE_NB_ETH_XSTATS
; i
++) {
3486 strlcpy(xstats_names
[count
].name
, ice_stats_strings
[i
].name
,
3487 sizeof(xstats_names
[count
].name
));
3491 /* Get individiual stats from ice_hw_port struct */
3492 for (i
= 0; i
< ICE_NB_HW_PORT_XSTATS
; i
++) {
3493 strlcpy(xstats_names
[count
].name
, ice_hw_port_strings
[i
].name
,
3494 sizeof(xstats_names
[count
].name
));
3502 ice_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
3503 struct rte_pci_device
*pci_dev
)
3505 return rte_eth_dev_pci_generic_probe(pci_dev
,
3506 sizeof(struct ice_adapter
),
3511 ice_pci_remove(struct rte_pci_device
*pci_dev
)
3513 return rte_eth_dev_pci_generic_remove(pci_dev
, ice_dev_uninit
);
3516 static struct rte_pci_driver rte_ice_pmd
= {
3517 .id_table
= pci_id_ice_map
,
3518 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
|
3519 RTE_PCI_DRV_IOVA_AS_VA
,
3520 .probe
= ice_pci_probe
,
3521 .remove
= ice_pci_remove
,
3525 * Driver initialization routine.
3526 * Invoked once at EAL init time.
3527 * Register itself as the [Poll Mode] Driver of PCI devices.
3529 RTE_PMD_REGISTER_PCI(net_ice
, rte_ice_pmd
);
3530 RTE_PMD_REGISTER_PCI_TABLE(net_ice
, pci_id_ice_map
);
3531 RTE_PMD_REGISTER_KMOD_DEP(net_ice
, "* igb_uio | uio_pci_generic | vfio-pci");
3532 RTE_PMD_REGISTER_PARAM_STRING(net_ice
,
3533 ICE_MAX_QP_NUM
"=<int>");
3535 RTE_INIT(ice_init_log
)
3537 ice_logtype_init
= rte_log_register("pmd.net.ice.init");
3538 if (ice_logtype_init
>= 0)
3539 rte_log_set_level(ice_logtype_init
, RTE_LOG_NOTICE
);
3540 ice_logtype_driver
= rte_log_register("pmd.net.ice.driver");
3541 if (ice_logtype_driver
>= 0)
3542 rte_log_set_level(ice_logtype_driver
, RTE_LOG_NOTICE
);