1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
12 #include <rte_alarm.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_debug.h>
15 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_mbuf_pool_ops.h>
20 #include <rte_prefetch.h>
21 #include <rte_bus_vdev.h>
23 #include "octeontx_ethdev.h"
24 #include "octeontx_rxtx.h"
25 #include "octeontx_logs.h"
27 struct octeontx_vdev_init_params
{
32 rte_octeontx_pchan_map
[OCTEONTX_MAX_BGX_PORTS
][OCTEONTX_MAX_LMAC_PER_BGX
];
34 enum octeontx_link_speed
{
35 OCTEONTX_LINK_SPEED_SGMII
,
36 OCTEONTX_LINK_SPEED_XAUI
,
37 OCTEONTX_LINK_SPEED_RXAUI
,
38 OCTEONTX_LINK_SPEED_10G_R
,
39 OCTEONTX_LINK_SPEED_40G_R
,
40 OCTEONTX_LINK_SPEED_RESERVE1
,
41 OCTEONTX_LINK_SPEED_QSGMII
,
42 OCTEONTX_LINK_SPEED_RESERVE2
45 int otx_net_logtype_mbox
;
46 int otx_net_logtype_init
;
47 int otx_net_logtype_driver
;
49 RTE_INIT(otx_net_init_log
)
51 otx_net_logtype_mbox
= rte_log_register("pmd.net.octeontx.mbox");
52 if (otx_net_logtype_mbox
>= 0)
53 rte_log_set_level(otx_net_logtype_mbox
, RTE_LOG_NOTICE
);
55 otx_net_logtype_init
= rte_log_register("pmd.net.octeontx.init");
56 if (otx_net_logtype_init
>= 0)
57 rte_log_set_level(otx_net_logtype_init
, RTE_LOG_NOTICE
);
59 otx_net_logtype_driver
= rte_log_register("pmd.net.octeontx.driver");
60 if (otx_net_logtype_driver
>= 0)
61 rte_log_set_level(otx_net_logtype_driver
, RTE_LOG_NOTICE
);
64 /* Parse integer from integer argument */
66 parse_integer_arg(const char *key __rte_unused
,
67 const char *value
, void *extra_args
)
69 int *i
= (int *)extra_args
;
73 octeontx_log_err("argument has to be positive.");
81 octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params
*params
,
82 struct rte_vdev_device
*dev
)
84 struct rte_kvargs
*kvlist
= NULL
;
87 static const char * const octeontx_vdev_valid_params
[] = {
88 OCTEONTX_VDEV_NR_PORT_ARG
,
92 const char *input_args
= rte_vdev_device_args(dev
);
98 kvlist
= rte_kvargs_parse(input_args
,
99 octeontx_vdev_valid_params
);
103 ret
= rte_kvargs_process(kvlist
,
104 OCTEONTX_VDEV_NR_PORT_ARG
,
112 rte_kvargs_free(kvlist
);
117 octeontx_port_open(struct octeontx_nic
*nic
)
119 octeontx_mbox_bgx_port_conf_t bgx_port_conf
;
123 memset(&bgx_port_conf
, 0x0, sizeof(bgx_port_conf
));
124 PMD_INIT_FUNC_TRACE();
126 res
= octeontx_bgx_port_open(nic
->port_id
, &bgx_port_conf
);
128 octeontx_log_err("failed to open port %d", res
);
132 nic
->node
= bgx_port_conf
.node
;
133 nic
->port_ena
= bgx_port_conf
.enable
;
134 nic
->base_ichan
= bgx_port_conf
.base_chan
;
135 nic
->base_ochan
= bgx_port_conf
.base_chan
;
136 nic
->num_ichans
= bgx_port_conf
.num_chans
;
137 nic
->num_ochans
= bgx_port_conf
.num_chans
;
138 nic
->mtu
= bgx_port_conf
.mtu
;
139 nic
->bpen
= bgx_port_conf
.bpen
;
140 nic
->fcs_strip
= bgx_port_conf
.fcs_strip
;
141 nic
->bcast_mode
= bgx_port_conf
.bcast_mode
;
142 nic
->mcast_mode
= bgx_port_conf
.mcast_mode
;
143 nic
->speed
= bgx_port_conf
.mode
;
145 memcpy(&nic
->mac_addr
[0], &bgx_port_conf
.macaddr
[0], ETHER_ADDR_LEN
);
147 octeontx_log_dbg("port opened %d", nic
->port_id
);
152 octeontx_port_close(struct octeontx_nic
*nic
)
154 PMD_INIT_FUNC_TRACE();
156 octeontx_bgx_port_close(nic
->port_id
);
157 octeontx_log_dbg("port closed %d", nic
->port_id
);
161 octeontx_port_start(struct octeontx_nic
*nic
)
163 PMD_INIT_FUNC_TRACE();
165 return octeontx_bgx_port_start(nic
->port_id
);
169 octeontx_port_stop(struct octeontx_nic
*nic
)
171 PMD_INIT_FUNC_TRACE();
173 return octeontx_bgx_port_stop(nic
->port_id
);
177 octeontx_port_promisc_set(struct octeontx_nic
*nic
, int en
)
179 struct rte_eth_dev
*dev
;
183 PMD_INIT_FUNC_TRACE();
186 res
= octeontx_bgx_port_promisc_set(nic
->port_id
, en
);
188 octeontx_log_err("failed to set promiscuous mode %d",
191 /* Set proper flag for the mode */
192 dev
->data
->promiscuous
= (en
!= 0) ? 1 : 0;
194 octeontx_log_dbg("port %d : promiscuous mode %s",
195 nic
->port_id
, en
? "set" : "unset");
199 octeontx_port_stats(struct octeontx_nic
*nic
, struct rte_eth_stats
*stats
)
201 octeontx_mbox_bgx_port_stats_t bgx_stats
;
204 PMD_INIT_FUNC_TRACE();
206 res
= octeontx_bgx_port_stats(nic
->port_id
, &bgx_stats
);
208 octeontx_log_err("failed to get port stats %d", nic
->port_id
);
212 stats
->ipackets
= bgx_stats
.rx_packets
;
213 stats
->ibytes
= bgx_stats
.rx_bytes
;
214 stats
->imissed
= bgx_stats
.rx_dropped
;
215 stats
->ierrors
= bgx_stats
.rx_errors
;
216 stats
->opackets
= bgx_stats
.tx_packets
;
217 stats
->obytes
= bgx_stats
.tx_bytes
;
218 stats
->oerrors
= bgx_stats
.tx_errors
;
220 octeontx_log_dbg("port%d stats inpkts=%" PRIx64
" outpkts=%" PRIx64
"",
221 nic
->port_id
, stats
->ipackets
, stats
->opackets
);
227 octeontx_port_stats_clr(struct octeontx_nic
*nic
)
229 PMD_INIT_FUNC_TRACE();
231 octeontx_bgx_port_stats_clr(nic
->port_id
);
235 devconf_set_default_sane_values(struct rte_event_dev_config
*dev_conf
,
236 struct rte_event_dev_info
*info
)
238 memset(dev_conf
, 0, sizeof(struct rte_event_dev_config
));
239 dev_conf
->dequeue_timeout_ns
= info
->min_dequeue_timeout_ns
;
241 dev_conf
->nb_event_ports
= info
->max_event_ports
;
242 dev_conf
->nb_event_queues
= info
->max_event_queues
;
244 dev_conf
->nb_event_queue_flows
= info
->max_event_queue_flows
;
245 dev_conf
->nb_event_port_dequeue_depth
=
246 info
->max_event_port_dequeue_depth
;
247 dev_conf
->nb_event_port_enqueue_depth
=
248 info
->max_event_port_enqueue_depth
;
249 dev_conf
->nb_event_port_enqueue_depth
=
250 info
->max_event_port_enqueue_depth
;
251 dev_conf
->nb_events_limit
=
252 info
->max_num_events
;
256 octeontx_dev_configure(struct rte_eth_dev
*dev
)
258 struct rte_eth_dev_data
*data
= dev
->data
;
259 struct rte_eth_conf
*conf
= &data
->dev_conf
;
260 struct rte_eth_rxmode
*rxmode
= &conf
->rxmode
;
261 struct rte_eth_txmode
*txmode
= &conf
->txmode
;
262 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
265 PMD_INIT_FUNC_TRACE();
268 if (!rte_eal_has_hugepages()) {
269 octeontx_log_err("huge page is not configured");
273 if (txmode
->mq_mode
) {
274 octeontx_log_err("tx mq_mode DCB or VMDq not supported");
278 if (rxmode
->mq_mode
!= ETH_MQ_RX_NONE
&&
279 rxmode
->mq_mode
!= ETH_MQ_RX_RSS
) {
280 octeontx_log_err("unsupported rx qmode %d", rxmode
->mq_mode
);
284 if (!(txmode
->offloads
& DEV_TX_OFFLOAD_MT_LOCKFREE
)) {
285 PMD_INIT_LOG(NOTICE
, "cant disable lockfree tx");
286 txmode
->offloads
|= DEV_TX_OFFLOAD_MT_LOCKFREE
;
289 if (conf
->link_speeds
& ETH_LINK_SPEED_FIXED
) {
290 octeontx_log_err("setting link speed/duplex not supported");
294 if (conf
->dcb_capability_en
) {
295 octeontx_log_err("DCB enable not supported");
299 if (conf
->fdir_conf
.mode
!= RTE_FDIR_MODE_NONE
) {
300 octeontx_log_err("flow director not supported");
304 nic
->num_tx_queues
= dev
->data
->nb_tx_queues
;
306 ret
= octeontx_pko_channel_open(nic
->port_id
* PKO_VF_NUM_DQ
,
310 octeontx_log_err("failed to open channel %d no-of-txq %d",
311 nic
->base_ochan
, nic
->num_tx_queues
);
315 nic
->pki
.classifier_enable
= false;
316 nic
->pki
.hash_enable
= true;
317 nic
->pki
.initialized
= false;
323 octeontx_dev_close(struct rte_eth_dev
*dev
)
325 struct octeontx_txq
*txq
= NULL
;
326 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
330 PMD_INIT_FUNC_TRACE();
332 rte_event_dev_close(nic
->evdev
);
334 ret
= octeontx_pko_channel_close(nic
->base_ochan
);
336 octeontx_log_err("failed to close channel %d VF%d %d %d",
337 nic
->base_ochan
, nic
->port_id
, nic
->num_tx_queues
,
340 /* Free txq resources for this port */
341 for (i
= 0; i
< nic
->num_tx_queues
; i
++) {
342 txq
= dev
->data
->tx_queues
[i
];
349 dev
->tx_pkt_burst
= NULL
;
350 dev
->rx_pkt_burst
= NULL
;
354 octeontx_dev_start(struct rte_eth_dev
*dev
)
356 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
361 PMD_INIT_FUNC_TRACE();
365 dev
->tx_pkt_burst
= octeontx_xmit_pkts
;
366 ret
= octeontx_pko_channel_start(nic
->base_ochan
);
368 octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
369 nic
->port_id
, nic
->num_tx_queues
, nic
->base_ochan
,
377 dev
->rx_pkt_burst
= octeontx_recv_pkts
;
378 ret
= octeontx_pki_port_start(nic
->port_id
);
380 octeontx_log_err("fail to start Rx on port %d", nic
->port_id
);
381 goto channel_stop_error
;
387 ret
= octeontx_port_start(nic
);
389 octeontx_log_err("failed start port %d", ret
);
390 goto pki_port_stop_error
;
393 PMD_TX_LOG(DEBUG
, "pko: start channel %d no.of txq %d port %d",
394 nic
->base_ochan
, nic
->num_tx_queues
, nic
->port_id
);
396 ret
= rte_event_dev_start(nic
->evdev
);
398 octeontx_log_err("failed to start evdev: ret (%d)", ret
);
399 goto pki_port_stop_error
;
406 octeontx_pki_port_stop(nic
->port_id
);
408 octeontx_pko_channel_stop(nic
->base_ochan
);
414 octeontx_dev_stop(struct rte_eth_dev
*dev
)
416 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
419 PMD_INIT_FUNC_TRACE();
421 rte_event_dev_stop(nic
->evdev
);
423 ret
= octeontx_port_stop(nic
);
425 octeontx_log_err("failed to req stop port %d res=%d",
430 ret
= octeontx_pki_port_stop(nic
->port_id
);
432 octeontx_log_err("failed to stop pki port %d res=%d",
437 ret
= octeontx_pko_channel_stop(nic
->base_ochan
);
439 octeontx_log_err("failed to stop channel %d VF%d %d %d",
440 nic
->base_ochan
, nic
->port_id
, nic
->num_tx_queues
,
447 octeontx_dev_promisc_enable(struct rte_eth_dev
*dev
)
449 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
451 PMD_INIT_FUNC_TRACE();
452 octeontx_port_promisc_set(nic
, 1);
456 octeontx_dev_promisc_disable(struct rte_eth_dev
*dev
)
458 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
460 PMD_INIT_FUNC_TRACE();
461 octeontx_port_promisc_set(nic
, 0);
465 octeontx_port_link_status(struct octeontx_nic
*nic
)
469 PMD_INIT_FUNC_TRACE();
470 res
= octeontx_bgx_port_link_status(nic
->port_id
);
472 octeontx_log_err("failed to get port %d link status",
477 nic
->link_up
= (uint8_t)res
;
478 octeontx_log_dbg("port %d link status %d", nic
->port_id
, nic
->link_up
);
484 * Return 0 means link status changed, -1 means not changed
487 octeontx_dev_link_update(struct rte_eth_dev
*dev
,
488 int wait_to_complete __rte_unused
)
490 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
491 struct rte_eth_link link
;
494 PMD_INIT_FUNC_TRACE();
496 res
= octeontx_port_link_status(nic
);
498 octeontx_log_err("failed to request link status %d", res
);
502 link
.link_status
= nic
->link_up
;
504 switch (nic
->speed
) {
505 case OCTEONTX_LINK_SPEED_SGMII
:
506 link
.link_speed
= ETH_SPEED_NUM_1G
;
509 case OCTEONTX_LINK_SPEED_XAUI
:
510 link
.link_speed
= ETH_SPEED_NUM_10G
;
513 case OCTEONTX_LINK_SPEED_RXAUI
:
514 case OCTEONTX_LINK_SPEED_10G_R
:
515 link
.link_speed
= ETH_SPEED_NUM_10G
;
517 case OCTEONTX_LINK_SPEED_QSGMII
:
518 link
.link_speed
= ETH_SPEED_NUM_5G
;
520 case OCTEONTX_LINK_SPEED_40G_R
:
521 link
.link_speed
= ETH_SPEED_NUM_40G
;
524 case OCTEONTX_LINK_SPEED_RESERVE1
:
525 case OCTEONTX_LINK_SPEED_RESERVE2
:
527 link
.link_speed
= ETH_SPEED_NUM_NONE
;
528 octeontx_log_err("incorrect link speed %d", nic
->speed
);
532 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
533 link
.link_autoneg
= ETH_LINK_AUTONEG
;
535 return rte_eth_linkstatus_set(dev
, &link
);
539 octeontx_dev_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
541 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
543 PMD_INIT_FUNC_TRACE();
544 return octeontx_port_stats(nic
, stats
);
548 octeontx_dev_stats_reset(struct rte_eth_dev
*dev
)
550 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
552 PMD_INIT_FUNC_TRACE();
553 octeontx_port_stats_clr(nic
);
557 octeontx_dev_default_mac_addr_set(struct rte_eth_dev
*dev
,
558 struct ether_addr
*addr
)
560 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
563 ret
= octeontx_bgx_port_mac_set(nic
->port_id
, addr
->addr_bytes
);
565 octeontx_log_err("failed to set MAC address on port %d",
572 octeontx_dev_info(struct rte_eth_dev
*dev
,
573 struct rte_eth_dev_info
*dev_info
)
577 /* Autonegotiation may be disabled */
578 dev_info
->speed_capa
= ETH_LINK_SPEED_FIXED
;
579 dev_info
->speed_capa
|= ETH_LINK_SPEED_10M
| ETH_LINK_SPEED_100M
|
580 ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_10G
|
583 dev_info
->max_mac_addrs
= 1;
584 dev_info
->max_rx_pktlen
= PKI_MAX_PKTLEN
;
585 dev_info
->max_rx_queues
= 1;
586 dev_info
->max_tx_queues
= PKO_MAX_NUM_DQ
;
587 dev_info
->min_rx_bufsize
= 0;
589 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
592 .offloads
= OCTEONTX_RX_OFFLOADS
,
595 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
597 .offloads
= OCTEONTX_TX_OFFLOADS
,
600 dev_info
->rx_offload_capa
= OCTEONTX_RX_OFFLOADS
;
601 dev_info
->tx_offload_capa
= OCTEONTX_TX_OFFLOADS
;
605 octeontx_dq_info_getter(octeontx_dq_t
*dq
, void *out
)
607 ((octeontx_dq_t
*)out
)->lmtline_va
= dq
->lmtline_va
;
608 ((octeontx_dq_t
*)out
)->ioreg_va
= dq
->ioreg_va
;
609 ((octeontx_dq_t
*)out
)->fc_status_va
= dq
->fc_status_va
;
613 octeontx_vf_start_tx_queue(struct rte_eth_dev
*dev
, struct octeontx_nic
*nic
,
616 struct octeontx_txq
*txq
;
619 PMD_INIT_FUNC_TRACE();
621 if (dev
->data
->tx_queue_state
[qidx
] == RTE_ETH_QUEUE_STATE_STARTED
)
624 txq
= dev
->data
->tx_queues
[qidx
];
626 res
= octeontx_pko_channel_query_dqs(nic
->base_ochan
,
628 sizeof(octeontx_dq_t
),
630 octeontx_dq_info_getter
);
636 dev
->data
->tx_queue_state
[qidx
] = RTE_ETH_QUEUE_STATE_STARTED
;
640 (void)octeontx_port_stop(nic
);
641 octeontx_pko_channel_stop(nic
->base_ochan
);
642 octeontx_pko_channel_close(nic
->base_ochan
);
643 dev
->data
->tx_queue_state
[qidx
] = RTE_ETH_QUEUE_STATE_STOPPED
;
648 octeontx_dev_tx_queue_start(struct rte_eth_dev
*dev
, uint16_t qidx
)
650 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
652 PMD_INIT_FUNC_TRACE();
653 qidx
= qidx
% PKO_VF_NUM_DQ
;
654 return octeontx_vf_start_tx_queue(dev
, nic
, qidx
);
658 octeontx_vf_stop_tx_queue(struct rte_eth_dev
*dev
, struct octeontx_nic
*nic
,
664 PMD_INIT_FUNC_TRACE();
666 if (dev
->data
->tx_queue_state
[qidx
] == RTE_ETH_QUEUE_STATE_STOPPED
)
669 dev
->data
->tx_queue_state
[qidx
] = RTE_ETH_QUEUE_STATE_STOPPED
;
674 octeontx_dev_tx_queue_stop(struct rte_eth_dev
*dev
, uint16_t qidx
)
676 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
678 PMD_INIT_FUNC_TRACE();
679 qidx
= qidx
% PKO_VF_NUM_DQ
;
681 return octeontx_vf_stop_tx_queue(dev
, nic
, qidx
);
685 octeontx_dev_tx_queue_release(void *tx_queue
)
687 struct octeontx_txq
*txq
= tx_queue
;
690 PMD_INIT_FUNC_TRACE();
693 res
= octeontx_dev_tx_queue_stop(txq
->eth_dev
, txq
->queue_id
);
695 octeontx_log_err("failed stop tx_queue(%d)\n",
703 octeontx_dev_tx_queue_setup(struct rte_eth_dev
*dev
, uint16_t qidx
,
704 uint16_t nb_desc
, unsigned int socket_id
,
705 const struct rte_eth_txconf
*tx_conf __rte_unused
)
707 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
708 struct octeontx_txq
*txq
= NULL
;
712 RTE_SET_USED(nb_desc
);
713 RTE_SET_USED(socket_id
);
715 dq_num
= (nic
->port_id
* PKO_VF_NUM_DQ
) + qidx
;
717 /* Socket id check */
718 if (socket_id
!= (unsigned int)SOCKET_ID_ANY
&&
719 socket_id
!= (unsigned int)nic
->node
)
720 PMD_TX_LOG(INFO
, "socket_id expected %d, configured %d",
721 socket_id
, nic
->node
);
723 /* Free memory prior to re-allocation if needed. */
724 if (dev
->data
->tx_queues
[qidx
] != NULL
) {
725 PMD_TX_LOG(DEBUG
, "freeing memory prior to re-allocation %d",
727 octeontx_dev_tx_queue_release(dev
->data
->tx_queues
[qidx
]);
728 dev
->data
->tx_queues
[qidx
] = NULL
;
731 /* Allocating tx queue data structure */
732 txq
= rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq
),
733 RTE_CACHE_LINE_SIZE
, nic
->node
);
735 octeontx_log_err("failed to allocate txq=%d", qidx
);
741 txq
->queue_id
= dq_num
;
742 dev
->data
->tx_queues
[qidx
] = txq
;
743 dev
->data
->tx_queue_state
[qidx
] = RTE_ETH_QUEUE_STATE_STOPPED
;
745 res
= octeontx_pko_channel_query_dqs(nic
->base_ochan
,
747 sizeof(octeontx_dq_t
),
749 octeontx_dq_info_getter
);
755 PMD_TX_LOG(DEBUG
, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
756 qidx
, txq
->queue_id
, txq
, nb_desc
, txq
->dq
.lmtline_va
,
758 txq
->dq
.fc_status_va
);
770 octeontx_dev_rx_queue_setup(struct rte_eth_dev
*dev
, uint16_t qidx
,
771 uint16_t nb_desc
, unsigned int socket_id
,
772 const struct rte_eth_rxconf
*rx_conf
,
773 struct rte_mempool
*mb_pool
)
775 struct octeontx_nic
*nic
= octeontx_pmd_priv(dev
);
776 struct rte_mempool_ops
*mp_ops
= NULL
;
777 struct octeontx_rxq
*rxq
= NULL
;
778 pki_pktbuf_cfg_t pktbuf_conf
;
779 pki_hash_cfg_t pki_hash
;
780 pki_qos_cfg_t pki_qos
;
784 unsigned int ev_queues
= (nic
->ev_queues
* nic
->port_id
) + qidx
;
785 unsigned int ev_ports
= (nic
->ev_ports
* nic
->port_id
) + qidx
;
787 RTE_SET_USED(nb_desc
);
789 memset(&pktbuf_conf
, 0, sizeof(pktbuf_conf
));
790 memset(&pki_hash
, 0, sizeof(pki_hash
));
791 memset(&pki_qos
, 0, sizeof(pki_qos
));
793 mp_ops
= rte_mempool_get_ops(mb_pool
->ops_index
);
794 if (strcmp(mp_ops
->name
, "octeontx_fpavf")) {
795 octeontx_log_err("failed to find octeontx_fpavf mempool");
799 /* Handle forbidden configurations */
800 if (nic
->pki
.classifier_enable
) {
801 octeontx_log_err("cannot setup queue %d. "
802 "Classifier option unsupported", qidx
);
808 /* Rx deferred start is not supported */
809 if (rx_conf
->rx_deferred_start
) {
810 octeontx_log_err("rx deferred start not supported");
814 /* Verify queue index */
815 if (qidx
>= dev
->data
->nb_rx_queues
) {
816 octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
817 qidx
, (dev
->data
->nb_rx_queues
- 1));
821 /* Socket id check */
822 if (socket_id
!= (unsigned int)SOCKET_ID_ANY
&&
823 socket_id
!= (unsigned int)nic
->node
)
824 PMD_RX_LOG(INFO
, "socket_id expected %d, configured %d",
825 socket_id
, nic
->node
);
827 /* Allocating rx queue data structure */
828 rxq
= rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq
),
829 RTE_CACHE_LINE_SIZE
, nic
->node
);
831 octeontx_log_err("failed to allocate rxq=%d", qidx
);
835 if (!nic
->pki
.initialized
) {
836 pktbuf_conf
.port_type
= 0;
837 pki_hash
.port_type
= 0;
838 pki_qos
.port_type
= 0;
840 pktbuf_conf
.mmask
.f_wqe_skip
= 1;
841 pktbuf_conf
.mmask
.f_first_skip
= 1;
842 pktbuf_conf
.mmask
.f_later_skip
= 1;
843 pktbuf_conf
.mmask
.f_mbuff_size
= 1;
844 pktbuf_conf
.mmask
.f_cache_mode
= 1;
846 pktbuf_conf
.wqe_skip
= OCTTX_PACKET_WQE_SKIP
;
847 pktbuf_conf
.first_skip
= OCTTX_PACKET_FIRST_SKIP(mb_pool
);
848 pktbuf_conf
.later_skip
= OCTTX_PACKET_LATER_SKIP
;
849 pktbuf_conf
.mbuff_size
= (mb_pool
->elt_size
-
850 RTE_PKTMBUF_HEADROOM
-
851 rte_pktmbuf_priv_size(mb_pool
) -
852 sizeof(struct rte_mbuf
));
854 pktbuf_conf
.cache_mode
= PKI_OPC_MODE_STF2_STT
;
856 ret
= octeontx_pki_port_pktbuf_config(port
, &pktbuf_conf
);
858 octeontx_log_err("fail to configure pktbuf for port %d",
863 PMD_RX_LOG(DEBUG
, "Port %d Rx pktbuf configured:\n"
864 "\tmbuf_size:\t0x%0x\n"
865 "\twqe_skip:\t0x%0x\n"
866 "\tfirst_skip:\t0x%0x\n"
867 "\tlater_skip:\t0x%0x\n"
868 "\tcache_mode:\t%s\n",
870 pktbuf_conf
.mbuff_size
,
871 pktbuf_conf
.wqe_skip
,
872 pktbuf_conf
.first_skip
,
873 pktbuf_conf
.later_skip
,
874 (pktbuf_conf
.cache_mode
==
877 (pktbuf_conf
.cache_mode
==
880 (pktbuf_conf
.cache_mode
==
881 PKI_OPC_MODE_STF1_STT
) ?
882 "STF1_STT" : "STF2_STT");
884 if (nic
->pki
.hash_enable
) {
885 pki_hash
.tag_dlc
= 1;
886 pki_hash
.tag_slc
= 1;
887 pki_hash
.tag_dlf
= 1;
888 pki_hash
.tag_slf
= 1;
889 pki_hash
.tag_prt
= 1;
890 octeontx_pki_port_hash_config(port
, &pki_hash
);
893 pool
= (uintptr_t)mb_pool
->pool_id
;
895 /* Get the gaura Id */
896 gaura
= octeontx_fpa_bufpool_gaura(pool
);
898 pki_qos
.qpg_qos
= PKI_QPG_QOS_NONE
;
899 pki_qos
.num_entry
= 1;
900 pki_qos
.drop_policy
= 0;
901 pki_qos
.tag_type
= 0L;
902 pki_qos
.qos_entry
[0].port_add
= 0;
903 pki_qos
.qos_entry
[0].gaura
= gaura
;
904 pki_qos
.qos_entry
[0].ggrp_ok
= ev_queues
;
905 pki_qos
.qos_entry
[0].ggrp_bad
= ev_queues
;
906 pki_qos
.qos_entry
[0].grptag_bad
= 0;
907 pki_qos
.qos_entry
[0].grptag_ok
= 0;
909 ret
= octeontx_pki_port_create_qos(port
, &pki_qos
);
911 octeontx_log_err("failed to create QOS port=%d, q=%d",
916 nic
->pki
.initialized
= true;
919 rxq
->port_id
= nic
->port_id
;
921 rxq
->queue_id
= qidx
;
922 rxq
->evdev
= nic
->evdev
;
923 rxq
->ev_queues
= ev_queues
;
924 rxq
->ev_ports
= ev_ports
;
926 dev
->data
->rx_queues
[qidx
] = rxq
;
927 dev
->data
->rx_queue_state
[qidx
] = RTE_ETH_QUEUE_STATE_STOPPED
;
932 octeontx_dev_rx_queue_release(void *rxq
)
937 static const uint32_t *
938 octeontx_dev_supported_ptypes_get(struct rte_eth_dev
*dev
)
940 static const uint32_t ptypes
[] = {
942 RTE_PTYPE_L3_IPV4_EXT
,
944 RTE_PTYPE_L3_IPV6_EXT
,
951 if (dev
->rx_pkt_burst
== octeontx_recv_pkts
)
958 octeontx_pool_ops(struct rte_eth_dev
*dev
, const char *pool
)
962 if (!strcmp(pool
, "octeontx_fpavf"))
968 /* Initialize and register driver with DPDK Application */
969 static const struct eth_dev_ops octeontx_dev_ops
= {
970 .dev_configure
= octeontx_dev_configure
,
971 .dev_infos_get
= octeontx_dev_info
,
972 .dev_close
= octeontx_dev_close
,
973 .dev_start
= octeontx_dev_start
,
974 .dev_stop
= octeontx_dev_stop
,
975 .promiscuous_enable
= octeontx_dev_promisc_enable
,
976 .promiscuous_disable
= octeontx_dev_promisc_disable
,
977 .link_update
= octeontx_dev_link_update
,
978 .stats_get
= octeontx_dev_stats_get
,
979 .stats_reset
= octeontx_dev_stats_reset
,
980 .mac_addr_set
= octeontx_dev_default_mac_addr_set
,
981 .tx_queue_start
= octeontx_dev_tx_queue_start
,
982 .tx_queue_stop
= octeontx_dev_tx_queue_stop
,
983 .tx_queue_setup
= octeontx_dev_tx_queue_setup
,
984 .tx_queue_release
= octeontx_dev_tx_queue_release
,
985 .rx_queue_setup
= octeontx_dev_rx_queue_setup
,
986 .rx_queue_release
= octeontx_dev_rx_queue_release
,
987 .dev_supported_ptypes_get
= octeontx_dev_supported_ptypes_get
,
988 .pool_ops_supported
= octeontx_pool_ops
,
991 /* Create Ethdev interface per BGX LMAC ports */
993 octeontx_create(struct rte_vdev_device
*dev
, int port
, uint8_t evdev
,
997 char octtx_name
[OCTEONTX_MAX_NAME_LEN
];
998 struct octeontx_nic
*nic
= NULL
;
999 struct rte_eth_dev
*eth_dev
= NULL
;
1000 struct rte_eth_dev_data
*data
;
1001 const char *name
= rte_vdev_device_name(dev
);
1003 PMD_INIT_FUNC_TRACE();
1005 sprintf(octtx_name
, "%s_%d", name
, port
);
1006 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
1007 eth_dev
= rte_eth_dev_attach_secondary(octtx_name
);
1008 if (eth_dev
== NULL
)
1011 eth_dev
->dev_ops
= &octeontx_dev_ops
;
1012 eth_dev
->device
= &dev
->device
;
1013 eth_dev
->tx_pkt_burst
= octeontx_xmit_pkts
;
1014 eth_dev
->rx_pkt_burst
= octeontx_recv_pkts
;
1015 rte_eth_dev_probing_finish(eth_dev
);
1019 /* Reserve an ethdev entry */
1020 eth_dev
= rte_eth_dev_allocate(octtx_name
);
1021 if (eth_dev
== NULL
) {
1022 octeontx_log_err("failed to allocate rte_eth_dev");
1026 data
= eth_dev
->data
;
1028 nic
= rte_zmalloc_socket(octtx_name
, sizeof(*nic
), 0, socket_id
);
1030 octeontx_log_err("failed to allocate nic structure");
1034 data
->dev_private
= nic
;
1036 nic
->port_id
= port
;
1039 res
= octeontx_port_open(nic
);
1043 /* Rx side port configuration */
1044 res
= octeontx_pki_port_open(port
);
1046 octeontx_log_err("failed to open PKI port %d", port
);
1051 eth_dev
->device
= &dev
->device
;
1052 eth_dev
->intr_handle
= NULL
;
1053 eth_dev
->data
->kdrv
= RTE_KDRV_NONE
;
1054 eth_dev
->data
->numa_node
= dev
->device
.numa_node
;
1056 data
->port_id
= eth_dev
->data
->port_id
;
1061 data
->dev_link
.link_status
= ETH_LINK_DOWN
;
1062 data
->dev_started
= 0;
1063 data
->promiscuous
= 0;
1064 data
->all_multicast
= 0;
1065 data
->scattered_rx
= 0;
1067 data
->mac_addrs
= rte_zmalloc_socket(octtx_name
, ETHER_ADDR_LEN
, 0,
1069 if (data
->mac_addrs
== NULL
) {
1070 octeontx_log_err("failed to allocate memory for mac_addrs");
1075 eth_dev
->dev_ops
= &octeontx_dev_ops
;
1077 /* Finally save ethdev pointer to the NIC structure */
1080 if (nic
->port_id
!= data
->port_id
) {
1081 octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)",
1082 data
->port_id
, nic
->port_id
);
1087 /* Update port_id mac to eth_dev */
1088 memcpy(data
->mac_addrs
, nic
->mac_addr
, ETHER_ADDR_LEN
);
1090 PMD_INIT_LOG(DEBUG
, "ethdev info: ");
1091 PMD_INIT_LOG(DEBUG
, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
1092 nic
->port_id
, nic
->port_ena
,
1093 nic
->base_ochan
, nic
->num_ochans
,
1094 nic
->num_tx_queues
);
1095 PMD_INIT_LOG(DEBUG
, "speed %d mtu %d", nic
->speed
, nic
->mtu
);
1097 rte_octeontx_pchan_map
[(nic
->base_ochan
>> 8) & 0x7]
1098 [(nic
->base_ochan
>> 4) & 0xF] = data
->port_id
;
1100 rte_eth_dev_probing_finish(eth_dev
);
1101 return data
->port_id
;
1105 octeontx_port_close(nic
);
1107 rte_eth_dev_release_port(eth_dev
);
1112 /* Un initialize octeontx device */
1114 octeontx_remove(struct rte_vdev_device
*dev
)
1116 char octtx_name
[OCTEONTX_MAX_NAME_LEN
];
1117 struct rte_eth_dev
*eth_dev
= NULL
;
1118 struct octeontx_nic
*nic
= NULL
;
1124 for (i
= 0; i
< OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
; i
++) {
1125 sprintf(octtx_name
, "eth_octeontx_%d", i
);
1127 /* reserve an ethdev entry */
1128 eth_dev
= rte_eth_dev_allocated(octtx_name
);
1129 if (eth_dev
== NULL
)
1132 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
1133 rte_eth_dev_release_port(eth_dev
);
1137 nic
= octeontx_pmd_priv(eth_dev
);
1138 rte_event_dev_stop(nic
->evdev
);
1139 PMD_INIT_LOG(INFO
, "Closing octeontx device %s", octtx_name
);
1141 rte_eth_dev_release_port(eth_dev
);
1142 rte_event_dev_close(nic
->evdev
);
1145 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1148 /* Free FC resource */
1149 octeontx_pko_fc_free();
1154 /* Initialize octeontx device */
1156 octeontx_probe(struct rte_vdev_device
*dev
)
1158 const char *dev_name
;
1159 static int probe_once
;
1160 uint8_t socket_id
, qlist
;
1161 int tx_vfcnt
, port_id
, evdev
, qnum
, pnum
, res
, i
;
1162 struct rte_event_dev_config dev_conf
;
1163 const char *eventdev_name
= "event_octeontx";
1164 struct rte_event_dev_info info
;
1165 struct rte_eth_dev
*eth_dev
;
1167 struct octeontx_vdev_init_params init_params
= {
1168 OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
1171 dev_name
= rte_vdev_device_name(dev
);
1173 if (rte_eal_process_type() == RTE_PROC_SECONDARY
&&
1174 strlen(rte_vdev_device_args(dev
)) == 0) {
1175 eth_dev
= rte_eth_dev_attach_secondary(dev_name
);
1177 RTE_LOG(ERR
, PMD
, "Failed to probe %s\n", dev_name
);
1180 /* TODO: request info from primary to set up Rx and Tx */
1181 eth_dev
->dev_ops
= &octeontx_dev_ops
;
1182 eth_dev
->device
= &dev
->device
;
1183 rte_eth_dev_probing_finish(eth_dev
);
1187 res
= octeontx_parse_vdev_init_params(&init_params
, dev
);
1191 if (init_params
.nr_port
> OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
) {
1192 octeontx_log_err("nr_port (%d) > max (%d)", init_params
.nr_port
,
1193 OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
);
1197 PMD_INIT_LOG(DEBUG
, "initializing %s pmd", dev_name
);
1199 socket_id
= rte_socket_id();
1201 tx_vfcnt
= octeontx_pko_vf_count();
1203 if (tx_vfcnt
< init_params
.nr_port
) {
1204 octeontx_log_err("not enough PKO (%d) for port number (%d)",
1205 tx_vfcnt
, init_params
.nr_port
);
1208 evdev
= rte_event_dev_get_dev_id(eventdev_name
);
1210 octeontx_log_err("eventdev %s not found", eventdev_name
);
1214 res
= rte_event_dev_info_get(evdev
, &info
);
1216 octeontx_log_err("failed to eventdev info %d", res
);
1220 PMD_INIT_LOG(DEBUG
, "max_queue %d max_port %d",
1221 info
.max_event_queues
, info
.max_event_ports
);
1223 if (octeontx_pko_init_fc(tx_vfcnt
))
1226 devconf_set_default_sane_values(&dev_conf
, &info
);
1227 res
= rte_event_dev_configure(evdev
, &dev_conf
);
1231 rte_event_dev_attr_get(evdev
, RTE_EVENT_DEV_ATTR_PORT_COUNT
,
1233 rte_event_dev_attr_get(evdev
, RTE_EVENT_DEV_ATTR_QUEUE_COUNT
,
1236 octeontx_log_err("too few event ports (%d) for event_q(%d)",
1242 /* Enable all queues available */
1243 for (i
= 0; i
< qnum
; i
++) {
1244 res
= rte_event_queue_setup(evdev
, i
, NULL
);
1246 octeontx_log_err("failed to setup event_q(%d): res %d",
1252 /* Enable all ports available */
1253 for (i
= 0; i
< pnum
; i
++) {
1254 res
= rte_event_port_setup(evdev
, i
, NULL
);
1257 octeontx_log_err("failed to setup ev port(%d) res=%d",
1264 * Do 1:1 links for ports & queues. All queues would be mapped to
1265 * one port. If there are more ports than queues, then some ports
1266 * won't be linked to any queue.
1268 for (i
= 0; i
< qnum
; i
++) {
1269 /* Link one queue to one event port */
1271 res
= rte_event_port_link(evdev
, i
, &qlist
, NULL
, 1);
1274 octeontx_log_err("failed to link port (%d): res=%d",
1280 /* Create ethdev interface */
1281 for (i
= 0; i
< init_params
.nr_port
; i
++) {
1282 port_id
= octeontx_create(dev
, i
, evdev
, socket_id
);
1284 octeontx_log_err("failed to create device %s",
1290 PMD_INIT_LOG(INFO
, "created ethdev %s for port %d", dev_name
,
1295 octeontx_log_err("interface %s not supported", dev_name
);
1296 octeontx_remove(dev
);
1300 rte_mbuf_set_platform_mempool_ops("octeontx_fpavf");
1306 octeontx_pko_fc_free();
1310 static struct rte_vdev_driver octeontx_pmd_drv
= {
1311 .probe
= octeontx_probe
,
1312 .remove
= octeontx_remove
,
1315 RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD
, octeontx_pmd_drv
);
1316 RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD
, eth_octeontx
);
1317 RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD
, "nr_port=<int> ");