1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
11 static int eth_axgbe_dev_init(struct rte_eth_dev
*eth_dev
);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev
*eth_dev
);
13 static int axgbe_dev_configure(struct rte_eth_dev
*dev
);
14 static int axgbe_dev_start(struct rte_eth_dev
*dev
);
15 static void axgbe_dev_stop(struct rte_eth_dev
*dev
);
16 static void axgbe_dev_interrupt_handler(void *param
);
17 static void axgbe_dev_close(struct rte_eth_dev
*dev
);
18 static void axgbe_dev_promiscuous_enable(struct rte_eth_dev
*dev
);
19 static void axgbe_dev_promiscuous_disable(struct rte_eth_dev
*dev
);
20 static void axgbe_dev_allmulticast_enable(struct rte_eth_dev
*dev
);
21 static void axgbe_dev_allmulticast_disable(struct rte_eth_dev
*dev
);
22 static int axgbe_dev_link_update(struct rte_eth_dev
*dev
,
23 int wait_to_complete
);
24 static int axgbe_dev_stats_get(struct rte_eth_dev
*dev
,
25 struct rte_eth_stats
*stats
);
26 static void axgbe_dev_stats_reset(struct rte_eth_dev
*dev
);
27 static void axgbe_dev_info_get(struct rte_eth_dev
*dev
,
28 struct rte_eth_dev_info
*dev_info
);
30 /* The set of PCI devices this driver supports */
31 #define AMD_PCI_VENDOR_ID 0x1022
32 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
33 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
35 int axgbe_logtype_init
;
36 int axgbe_logtype_driver
;
38 static const struct rte_pci_id pci_id_axgbe_map
[] = {
39 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID
, AMD_PCI_AXGBE_DEVICE_V2A
)},
40 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID
, AMD_PCI_AXGBE_DEVICE_V2B
)},
44 static struct axgbe_version_data axgbe_v2a
= {
45 .init_function_ptrs_phy_impl
= axgbe_init_function_ptrs_phy_v2
,
46 .xpcs_access
= AXGBE_XPCS_ACCESS_V2
,
48 .tx_max_fifo_size
= 229376,
49 .rx_max_fifo_size
= 229376,
50 .tx_tstamp_workaround
= 1,
53 .an_cdr_workaround
= 1,
56 static struct axgbe_version_data axgbe_v2b
= {
57 .init_function_ptrs_phy_impl
= axgbe_init_function_ptrs_phy_v2
,
58 .xpcs_access
= AXGBE_XPCS_ACCESS_V2
,
60 .tx_max_fifo_size
= 65536,
61 .rx_max_fifo_size
= 65536,
62 .tx_tstamp_workaround
= 1,
65 .an_cdr_workaround
= 1,
68 static const struct rte_eth_desc_lim rx_desc_lim
= {
69 .nb_max
= AXGBE_MAX_RING_DESC
,
70 .nb_min
= AXGBE_MIN_RING_DESC
,
74 static const struct rte_eth_desc_lim tx_desc_lim
= {
75 .nb_max
= AXGBE_MAX_RING_DESC
,
76 .nb_min
= AXGBE_MIN_RING_DESC
,
80 static const struct eth_dev_ops axgbe_eth_dev_ops
= {
81 .dev_configure
= axgbe_dev_configure
,
82 .dev_start
= axgbe_dev_start
,
83 .dev_stop
= axgbe_dev_stop
,
84 .dev_close
= axgbe_dev_close
,
85 .promiscuous_enable
= axgbe_dev_promiscuous_enable
,
86 .promiscuous_disable
= axgbe_dev_promiscuous_disable
,
87 .allmulticast_enable
= axgbe_dev_allmulticast_enable
,
88 .allmulticast_disable
= axgbe_dev_allmulticast_disable
,
89 .link_update
= axgbe_dev_link_update
,
90 .stats_get
= axgbe_dev_stats_get
,
91 .stats_reset
= axgbe_dev_stats_reset
,
92 .dev_infos_get
= axgbe_dev_info_get
,
93 .rx_queue_setup
= axgbe_dev_rx_queue_setup
,
94 .rx_queue_release
= axgbe_dev_rx_queue_release
,
95 .tx_queue_setup
= axgbe_dev_tx_queue_setup
,
96 .tx_queue_release
= axgbe_dev_tx_queue_release
,
99 static int axgbe_phy_reset(struct axgbe_port
*pdata
)
101 pdata
->phy_link
= -1;
102 pdata
->phy_speed
= SPEED_UNKNOWN
;
103 return pdata
->phy_if
.phy_reset(pdata
);
107 * Interrupt handler triggered by NIC for handling
108 * specific interrupt.
111 * Pointer to interrupt handle.
113 * The address of parameter (struct rte_eth_dev *) regsitered before.
119 axgbe_dev_interrupt_handler(void *param
)
121 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
122 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
123 unsigned int dma_isr
, dma_ch_isr
;
125 pdata
->phy_if
.an_isr(pdata
);
126 /*DMA related interrupts*/
127 dma_isr
= AXGMAC_IOREAD(pdata
, DMA_ISR
);
131 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue
*)
134 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue
*)
136 DMA_CH_SR
, dma_ch_isr
);
139 /* Enable interrupts since disabled after generation*/
140 rte_intr_enable(&pdata
->pci_dev
->intr_handle
);
144 * Configure device link speed and setup link.
145 * It returns 0 on success.
148 axgbe_dev_configure(struct rte_eth_dev
*dev
)
150 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
151 /* Checksum offload to hardware */
152 pdata
->rx_csum_enable
= dev
->data
->dev_conf
.rxmode
.offloads
&
153 DEV_RX_OFFLOAD_CHECKSUM
;
158 axgbe_dev_rx_mq_config(struct rte_eth_dev
*dev
)
160 struct axgbe_port
*pdata
= (struct axgbe_port
*)dev
->data
->dev_private
;
162 if (dev
->data
->dev_conf
.rxmode
.mq_mode
== ETH_MQ_RX_RSS
)
163 pdata
->rss_enable
= 1;
164 else if (dev
->data
->dev_conf
.rxmode
.mq_mode
== ETH_MQ_RX_NONE
)
165 pdata
->rss_enable
= 0;
172 axgbe_dev_start(struct rte_eth_dev
*dev
)
174 PMD_INIT_FUNC_TRACE();
175 struct axgbe_port
*pdata
= (struct axgbe_port
*)dev
->data
->dev_private
;
179 ret
= axgbe_dev_rx_mq_config(dev
);
181 PMD_DRV_LOG(ERR
, "Unable to config RX MQ\n");
184 ret
= axgbe_phy_reset(pdata
);
186 PMD_DRV_LOG(ERR
, "phy reset failed\n");
189 ret
= pdata
->hw_if
.init(pdata
);
191 PMD_DRV_LOG(ERR
, "dev_init failed\n");
195 /* enable uio/vfio intr/eventfd mapping */
196 rte_intr_enable(&pdata
->pci_dev
->intr_handle
);
199 pdata
->phy_if
.phy_start(pdata
);
200 axgbe_dev_enable_tx(dev
);
201 axgbe_dev_enable_rx(dev
);
203 axgbe_clear_bit(AXGBE_STOPPED
, &pdata
->dev_state
);
204 axgbe_clear_bit(AXGBE_DOWN
, &pdata
->dev_state
);
208 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
210 axgbe_dev_stop(struct rte_eth_dev
*dev
)
212 PMD_INIT_FUNC_TRACE();
213 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
215 rte_intr_disable(&pdata
->pci_dev
->intr_handle
);
217 if (axgbe_test_bit(AXGBE_STOPPED
, &pdata
->dev_state
))
220 axgbe_set_bit(AXGBE_STOPPED
, &pdata
->dev_state
);
221 axgbe_dev_disable_tx(dev
);
222 axgbe_dev_disable_rx(dev
);
224 pdata
->phy_if
.phy_stop(pdata
);
225 pdata
->hw_if
.exit(pdata
);
226 memset(&dev
->data
->dev_link
, 0, sizeof(struct rte_eth_link
));
227 axgbe_set_bit(AXGBE_DOWN
, &pdata
->dev_state
);
230 /* Clear all resources like TX/RX queues. */
232 axgbe_dev_close(struct rte_eth_dev
*dev
)
234 axgbe_dev_clear_queues(dev
);
238 axgbe_dev_promiscuous_enable(struct rte_eth_dev
*dev
)
240 PMD_INIT_FUNC_TRACE();
241 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
243 AXGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PR
, 1);
247 axgbe_dev_promiscuous_disable(struct rte_eth_dev
*dev
)
249 PMD_INIT_FUNC_TRACE();
250 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
252 AXGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PR
, 0);
256 axgbe_dev_allmulticast_enable(struct rte_eth_dev
*dev
)
258 PMD_INIT_FUNC_TRACE();
259 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
261 if (AXGMAC_IOREAD_BITS(pdata
, MAC_PFR
, PM
))
263 AXGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PM
, 1);
267 axgbe_dev_allmulticast_disable(struct rte_eth_dev
*dev
)
269 PMD_INIT_FUNC_TRACE();
270 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
272 if (!AXGMAC_IOREAD_BITS(pdata
, MAC_PFR
, PM
))
274 AXGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PM
, 0);
277 /* return 0 means link status changed, -1 means not changed */
279 axgbe_dev_link_update(struct rte_eth_dev
*dev
,
280 int wait_to_complete __rte_unused
)
282 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
283 struct rte_eth_link link
;
286 PMD_INIT_FUNC_TRACE();
289 pdata
->phy_if
.phy_status(pdata
);
291 memset(&link
, 0, sizeof(struct rte_eth_link
));
292 link
.link_duplex
= pdata
->phy
.duplex
;
293 link
.link_status
= pdata
->phy_link
;
294 link
.link_speed
= pdata
->phy_speed
;
295 link
.link_autoneg
= !(dev
->data
->dev_conf
.link_speeds
&
296 ETH_LINK_SPEED_FIXED
);
297 ret
= rte_eth_linkstatus_set(dev
, &link
);
299 PMD_DRV_LOG(ERR
, "No change in link status\n");
305 axgbe_dev_stats_get(struct rte_eth_dev
*dev
,
306 struct rte_eth_stats
*stats
)
308 struct axgbe_rx_queue
*rxq
;
309 struct axgbe_tx_queue
*txq
;
312 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
313 rxq
= dev
->data
->rx_queues
[i
];
314 stats
->q_ipackets
[i
] = rxq
->pkts
;
315 stats
->ipackets
+= rxq
->pkts
;
316 stats
->q_ibytes
[i
] = rxq
->bytes
;
317 stats
->ibytes
+= rxq
->bytes
;
319 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
320 txq
= dev
->data
->tx_queues
[i
];
321 stats
->q_opackets
[i
] = txq
->pkts
;
322 stats
->opackets
+= txq
->pkts
;
323 stats
->q_obytes
[i
] = txq
->bytes
;
324 stats
->obytes
+= txq
->bytes
;
331 axgbe_dev_stats_reset(struct rte_eth_dev
*dev
)
333 struct axgbe_rx_queue
*rxq
;
334 struct axgbe_tx_queue
*txq
;
337 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
338 rxq
= dev
->data
->rx_queues
[i
];
343 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
344 txq
= dev
->data
->tx_queues
[i
];
352 axgbe_dev_info_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
354 struct axgbe_port
*pdata
= dev
->data
->dev_private
;
356 dev_info
->max_rx_queues
= pdata
->rx_ring_count
;
357 dev_info
->max_tx_queues
= pdata
->tx_ring_count
;
358 dev_info
->min_rx_bufsize
= AXGBE_RX_MIN_BUF_SIZE
;
359 dev_info
->max_rx_pktlen
= AXGBE_RX_MAX_BUF_SIZE
;
360 dev_info
->max_mac_addrs
= AXGBE_MAX_MAC_ADDRS
;
361 dev_info
->speed_capa
= ETH_LINK_SPEED_10G
;
363 dev_info
->rx_offload_capa
=
364 DEV_RX_OFFLOAD_IPV4_CKSUM
|
365 DEV_RX_OFFLOAD_UDP_CKSUM
|
366 DEV_RX_OFFLOAD_TCP_CKSUM
|
367 DEV_RX_OFFLOAD_CRC_STRIP
|
368 DEV_RX_OFFLOAD_KEEP_CRC
;
370 dev_info
->tx_offload_capa
=
371 DEV_TX_OFFLOAD_IPV4_CKSUM
|
372 DEV_TX_OFFLOAD_UDP_CKSUM
|
373 DEV_TX_OFFLOAD_TCP_CKSUM
;
375 if (pdata
->hw_feat
.rss
) {
376 dev_info
->flow_type_rss_offloads
= AXGBE_RSS_OFFLOAD
;
377 dev_info
->reta_size
= pdata
->hw_feat
.hash_table_size
;
378 dev_info
->hash_key_size
= AXGBE_RSS_HASH_KEY_SIZE
;
381 dev_info
->rx_desc_lim
= rx_desc_lim
;
382 dev_info
->tx_desc_lim
= tx_desc_lim
;
384 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
385 .rx_free_thresh
= AXGBE_RX_FREE_THRESH
,
388 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
389 .tx_free_thresh
= AXGBE_TX_FREE_THRESH
,
393 static void axgbe_get_all_hw_features(struct axgbe_port
*pdata
)
395 unsigned int mac_hfr0
, mac_hfr1
, mac_hfr2
;
396 struct axgbe_hw_features
*hw_feat
= &pdata
->hw_feat
;
398 mac_hfr0
= AXGMAC_IOREAD(pdata
, MAC_HWF0R
);
399 mac_hfr1
= AXGMAC_IOREAD(pdata
, MAC_HWF1R
);
400 mac_hfr2
= AXGMAC_IOREAD(pdata
, MAC_HWF2R
);
402 memset(hw_feat
, 0, sizeof(*hw_feat
));
404 hw_feat
->version
= AXGMAC_IOREAD(pdata
, MAC_VR
);
406 /* Hardware feature register 0 */
407 hw_feat
->gmii
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, GMIISEL
);
408 hw_feat
->vlhash
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VLHASH
);
409 hw_feat
->sma
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SMASEL
);
410 hw_feat
->rwk
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RWKSEL
);
411 hw_feat
->mgk
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MGKSEL
);
412 hw_feat
->mmc
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MMCSEL
);
413 hw_feat
->aoe
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, ARPOFFSEL
);
414 hw_feat
->ts
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSEL
);
415 hw_feat
->eee
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, EEESEL
);
416 hw_feat
->tx_coe
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TXCOESEL
);
417 hw_feat
->rx_coe
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RXCOESEL
);
418 hw_feat
->addn_mac
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
,
420 hw_feat
->ts_src
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSTSSEL
);
421 hw_feat
->sa_vlan_ins
= AXGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SAVLANINS
);
423 /* Hardware feature register 1 */
424 hw_feat
->rx_fifo_size
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
426 hw_feat
->tx_fifo_size
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
428 hw_feat
->adv_ts_hi
= AXGMAC_GET_BITS(mac_hfr1
,
429 MAC_HWF1R
, ADVTHWORD
);
430 hw_feat
->dma_width
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADDR64
);
431 hw_feat
->dcb
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DCBEN
);
432 hw_feat
->sph
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, SPHEN
);
433 hw_feat
->tso
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, TSOEN
);
434 hw_feat
->dma_debug
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DBGMEMA
);
435 hw_feat
->rss
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, RSSEN
);
436 hw_feat
->tc_cnt
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, NUMTC
);
437 hw_feat
->hash_table_size
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
439 hw_feat
->l3l4_filter_num
= AXGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
442 /* Hardware feature register 2 */
443 hw_feat
->rx_q_cnt
= AXGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXQCNT
);
444 hw_feat
->tx_q_cnt
= AXGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXQCNT
);
445 hw_feat
->rx_ch_cnt
= AXGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXCHCNT
);
446 hw_feat
->tx_ch_cnt
= AXGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXCHCNT
);
447 hw_feat
->pps_out_num
= AXGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, PPSOUTNUM
);
448 hw_feat
->aux_snap_num
= AXGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
,
451 /* Translate the Hash Table size into actual number */
452 switch (hw_feat
->hash_table_size
) {
456 hw_feat
->hash_table_size
= 64;
459 hw_feat
->hash_table_size
= 128;
462 hw_feat
->hash_table_size
= 256;
466 /* Translate the address width setting into actual number */
467 switch (hw_feat
->dma_width
) {
469 hw_feat
->dma_width
= 32;
472 hw_feat
->dma_width
= 40;
475 hw_feat
->dma_width
= 48;
478 hw_feat
->dma_width
= 32;
481 /* The Queue, Channel and TC counts are zero based so increment them
482 * to get the actual number
486 hw_feat
->rx_ch_cnt
++;
487 hw_feat
->tx_ch_cnt
++;
490 /* Translate the fifo sizes into actual numbers */
491 hw_feat
->rx_fifo_size
= 1 << (hw_feat
->rx_fifo_size
+ 7);
492 hw_feat
->tx_fifo_size
= 1 << (hw_feat
->tx_fifo_size
+ 7);
495 static void axgbe_init_all_fptrs(struct axgbe_port
*pdata
)
497 axgbe_init_function_ptrs_dev(&pdata
->hw_if
);
498 axgbe_init_function_ptrs_phy(&pdata
->phy_if
);
499 axgbe_init_function_ptrs_i2c(&pdata
->i2c_if
);
500 pdata
->vdata
->init_function_ptrs_phy_impl(&pdata
->phy_if
);
503 static void axgbe_set_counts(struct axgbe_port
*pdata
)
505 /* Set all the function pointers */
506 axgbe_init_all_fptrs(pdata
);
508 /* Populate the hardware features */
509 axgbe_get_all_hw_features(pdata
);
511 /* Set default max values if not provided */
512 if (!pdata
->tx_max_channel_count
)
513 pdata
->tx_max_channel_count
= pdata
->hw_feat
.tx_ch_cnt
;
514 if (!pdata
->rx_max_channel_count
)
515 pdata
->rx_max_channel_count
= pdata
->hw_feat
.rx_ch_cnt
;
517 if (!pdata
->tx_max_q_count
)
518 pdata
->tx_max_q_count
= pdata
->hw_feat
.tx_q_cnt
;
519 if (!pdata
->rx_max_q_count
)
520 pdata
->rx_max_q_count
= pdata
->hw_feat
.rx_q_cnt
;
522 /* Calculate the number of Tx and Rx rings to be created
523 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
524 * the number of Tx queues to the number of Tx channels
526 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
527 * number of Rx queues or maximum allowed
529 pdata
->tx_ring_count
= RTE_MIN(pdata
->hw_feat
.tx_ch_cnt
,
530 pdata
->tx_max_channel_count
);
531 pdata
->tx_ring_count
= RTE_MIN(pdata
->tx_ring_count
,
532 pdata
->tx_max_q_count
);
534 pdata
->tx_q_count
= pdata
->tx_ring_count
;
536 pdata
->rx_ring_count
= RTE_MIN(pdata
->hw_feat
.rx_ch_cnt
,
537 pdata
->rx_max_channel_count
);
539 pdata
->rx_q_count
= RTE_MIN(pdata
->hw_feat
.rx_q_cnt
,
540 pdata
->rx_max_q_count
);
543 static void axgbe_default_config(struct axgbe_port
*pdata
)
545 pdata
->pblx8
= DMA_PBL_X8_ENABLE
;
546 pdata
->tx_sf_mode
= MTL_TSF_ENABLE
;
547 pdata
->tx_threshold
= MTL_TX_THRESHOLD_64
;
548 pdata
->tx_pbl
= DMA_PBL_32
;
549 pdata
->tx_osp_mode
= DMA_OSP_ENABLE
;
550 pdata
->rx_sf_mode
= MTL_RSF_ENABLE
;
551 pdata
->rx_threshold
= MTL_RX_THRESHOLD_64
;
552 pdata
->rx_pbl
= DMA_PBL_32
;
553 pdata
->pause_autoneg
= 1;
556 pdata
->phy_speed
= SPEED_UNKNOWN
;
557 pdata
->power_down
= 0;
561 * It returns 0 on success.
564 eth_axgbe_dev_init(struct rte_eth_dev
*eth_dev
)
566 PMD_INIT_FUNC_TRACE();
567 struct axgbe_port
*pdata
;
568 struct rte_pci_device
*pci_dev
;
569 uint32_t reg
, mac_lo
, mac_hi
;
572 eth_dev
->dev_ops
= &axgbe_eth_dev_ops
;
573 eth_dev
->rx_pkt_burst
= &axgbe_recv_pkts
;
576 * For secondary processes, we don't initialise any further as primary
577 * has already done this work.
579 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
582 pdata
= (struct axgbe_port
*)eth_dev
->data
->dev_private
;
584 axgbe_set_bit(AXGBE_DOWN
, &pdata
->dev_state
);
585 axgbe_set_bit(AXGBE_STOPPED
, &pdata
->dev_state
);
586 pdata
->eth_dev
= eth_dev
;
588 pci_dev
= RTE_DEV_TO_PCI(eth_dev
->device
);
589 pdata
->pci_dev
= pci_dev
;
592 (void *)pci_dev
->mem_resource
[AXGBE_AXGMAC_BAR
].addr
;
593 pdata
->xprop_regs
= (void *)((uint8_t *)pdata
->xgmac_regs
594 + AXGBE_MAC_PROP_OFFSET
);
595 pdata
->xi2c_regs
= (void *)((uint8_t *)pdata
->xgmac_regs
596 + AXGBE_I2C_CTRL_OFFSET
);
597 pdata
->xpcs_regs
= (void *)pci_dev
->mem_resource
[AXGBE_XPCS_BAR
].addr
;
599 /* version specific driver data*/
600 if (pci_dev
->id
.device_id
== AMD_PCI_AXGBE_DEVICE_V2A
)
601 pdata
->vdata
= &axgbe_v2a
;
603 pdata
->vdata
= &axgbe_v2b
;
605 /* Configure the PCS indirect addressing support */
606 reg
= XPCS32_IOREAD(pdata
, PCS_V2_WINDOW_DEF
);
607 pdata
->xpcs_window
= XPCS_GET_BITS(reg
, PCS_V2_WINDOW_DEF
, OFFSET
);
608 pdata
->xpcs_window
<<= 6;
609 pdata
->xpcs_window_size
= XPCS_GET_BITS(reg
, PCS_V2_WINDOW_DEF
, SIZE
);
610 pdata
->xpcs_window_size
= 1 << (pdata
->xpcs_window_size
+ 7);
611 pdata
->xpcs_window_mask
= pdata
->xpcs_window_size
- 1;
612 pdata
->xpcs_window_def_reg
= PCS_V2_WINDOW_DEF
;
613 pdata
->xpcs_window_sel_reg
= PCS_V2_WINDOW_SELECT
;
615 "xpcs window :%x, size :%x, mask :%x ", pdata
->xpcs_window
,
616 pdata
->xpcs_window_size
, pdata
->xpcs_window_mask
);
617 XP_IOWRITE(pdata
, XP_INT_EN
, 0x1fffff);
619 /* Retrieve the MAC address */
620 mac_lo
= XP_IOREAD(pdata
, XP_MAC_ADDR_LO
);
621 mac_hi
= XP_IOREAD(pdata
, XP_MAC_ADDR_HI
);
622 pdata
->mac_addr
.addr_bytes
[0] = mac_lo
& 0xff;
623 pdata
->mac_addr
.addr_bytes
[1] = (mac_lo
>> 8) & 0xff;
624 pdata
->mac_addr
.addr_bytes
[2] = (mac_lo
>> 16) & 0xff;
625 pdata
->mac_addr
.addr_bytes
[3] = (mac_lo
>> 24) & 0xff;
626 pdata
->mac_addr
.addr_bytes
[4] = mac_hi
& 0xff;
627 pdata
->mac_addr
.addr_bytes
[5] = (mac_hi
>> 8) & 0xff;
629 eth_dev
->data
->mac_addrs
= rte_zmalloc("axgbe_mac_addr",
631 if (!eth_dev
->data
->mac_addrs
) {
633 "Failed to alloc %u bytes needed to store MAC addr tbl",
638 if (!is_valid_assigned_ether_addr(&pdata
->mac_addr
))
639 eth_random_addr(pdata
->mac_addr
.addr_bytes
);
641 /* Copy the permanent MAC address */
642 ether_addr_copy(&pdata
->mac_addr
, ð_dev
->data
->mac_addrs
[0]);
645 pdata
->sysclk_rate
= AXGBE_V2_DMA_CLOCK_FREQ
;
646 pdata
->ptpclk_rate
= AXGBE_V2_PTP_CLOCK_FREQ
;
648 /* Set the DMA coherency values */
650 pdata
->axdomain
= AXGBE_DMA_OS_AXDOMAIN
;
651 pdata
->arcache
= AXGBE_DMA_OS_ARCACHE
;
652 pdata
->awcache
= AXGBE_DMA_OS_AWCACHE
;
654 /* Set the maximum channels and queues */
655 reg
= XP_IOREAD(pdata
, XP_PROP_1
);
656 pdata
->tx_max_channel_count
= XP_GET_BITS(reg
, XP_PROP_1
, MAX_TX_DMA
);
657 pdata
->rx_max_channel_count
= XP_GET_BITS(reg
, XP_PROP_1
, MAX_RX_DMA
);
658 pdata
->tx_max_q_count
= XP_GET_BITS(reg
, XP_PROP_1
, MAX_TX_QUEUES
);
659 pdata
->rx_max_q_count
= XP_GET_BITS(reg
, XP_PROP_1
, MAX_RX_QUEUES
);
661 /* Set the hardware channel and queue counts */
662 axgbe_set_counts(pdata
);
664 /* Set the maximum fifo amounts */
665 reg
= XP_IOREAD(pdata
, XP_PROP_2
);
666 pdata
->tx_max_fifo_size
= XP_GET_BITS(reg
, XP_PROP_2
, TX_FIFO_SIZE
);
667 pdata
->tx_max_fifo_size
*= 16384;
668 pdata
->tx_max_fifo_size
= RTE_MIN(pdata
->tx_max_fifo_size
,
669 pdata
->vdata
->tx_max_fifo_size
);
670 pdata
->rx_max_fifo_size
= XP_GET_BITS(reg
, XP_PROP_2
, RX_FIFO_SIZE
);
671 pdata
->rx_max_fifo_size
*= 16384;
672 pdata
->rx_max_fifo_size
= RTE_MIN(pdata
->rx_max_fifo_size
,
673 pdata
->vdata
->rx_max_fifo_size
);
674 /* Issue software reset to DMA */
675 ret
= pdata
->hw_if
.exit(pdata
);
677 PMD_DRV_LOG(ERR
, "hw_if->exit EBUSY error\n");
679 /* Set default configuration data */
680 axgbe_default_config(pdata
);
682 /* Set default max values if not provided */
683 if (!pdata
->tx_max_fifo_size
)
684 pdata
->tx_max_fifo_size
= pdata
->hw_feat
.tx_fifo_size
;
685 if (!pdata
->rx_max_fifo_size
)
686 pdata
->rx_max_fifo_size
= pdata
->hw_feat
.rx_fifo_size
;
688 pdata
->tx_desc_count
= AXGBE_MAX_RING_DESC
;
689 pdata
->rx_desc_count
= AXGBE_MAX_RING_DESC
;
690 pthread_mutex_init(&pdata
->xpcs_mutex
, NULL
);
691 pthread_mutex_init(&pdata
->i2c_mutex
, NULL
);
692 pthread_mutex_init(&pdata
->an_mutex
, NULL
);
693 pthread_mutex_init(&pdata
->phy_mutex
, NULL
);
695 ret
= pdata
->phy_if
.phy_init(pdata
);
697 rte_free(eth_dev
->data
->mac_addrs
);
701 rte_intr_callback_register(&pci_dev
->intr_handle
,
702 axgbe_dev_interrupt_handler
,
704 PMD_INIT_LOG(DEBUG
, "port %d vendorID=0x%x deviceID=0x%x",
705 eth_dev
->data
->port_id
, pci_dev
->id
.vendor_id
,
706 pci_dev
->id
.device_id
);
712 eth_axgbe_dev_uninit(struct rte_eth_dev
*eth_dev
)
714 struct rte_pci_device
*pci_dev
;
716 PMD_INIT_FUNC_TRACE();
718 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
721 pci_dev
= RTE_DEV_TO_PCI(eth_dev
->device
);
723 rte_free(eth_dev
->data
->mac_addrs
);
724 eth_dev
->data
->mac_addrs
= NULL
;
725 eth_dev
->dev_ops
= NULL
;
726 eth_dev
->rx_pkt_burst
= NULL
;
727 eth_dev
->tx_pkt_burst
= NULL
;
728 axgbe_dev_clear_queues(eth_dev
);
730 /* disable uio intr before callback unregister */
731 rte_intr_disable(&pci_dev
->intr_handle
);
732 rte_intr_callback_unregister(&pci_dev
->intr_handle
,
733 axgbe_dev_interrupt_handler
,
739 static int eth_axgbe_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
740 struct rte_pci_device
*pci_dev
)
742 return rte_eth_dev_pci_generic_probe(pci_dev
,
743 sizeof(struct axgbe_port
), eth_axgbe_dev_init
);
746 static int eth_axgbe_pci_remove(struct rte_pci_device
*pci_dev
)
748 return rte_eth_dev_pci_generic_remove(pci_dev
, eth_axgbe_dev_uninit
);
751 static struct rte_pci_driver rte_axgbe_pmd
= {
752 .id_table
= pci_id_axgbe_map
,
753 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
,
754 .probe
= eth_axgbe_pci_probe
,
755 .remove
= eth_axgbe_pci_remove
,
758 RTE_PMD_REGISTER_PCI(net_axgbe
, rte_axgbe_pmd
);
759 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe
, pci_id_axgbe_map
);
760 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe
, "* igb_uio | uio_pci_generic | vfio-pci");
762 RTE_INIT(axgbe_init_log
)
764 axgbe_logtype_init
= rte_log_register("pmd.net.axgbe.init");
765 if (axgbe_logtype_init
>= 0)
766 rte_log_set_level(axgbe_logtype_init
, RTE_LOG_NOTICE
);
767 axgbe_logtype_driver
= rte_log_register("pmd.net.axgbe.driver");
768 if (axgbe_logtype_driver
>= 0)
769 rte_log_set_level(axgbe_logtype_driver
, RTE_LOG_NOTICE
);