1 /* * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include <fsl_qbman_debug.h>
31 #define DRIVER_LOOPBACK_MODE "drv_loopback"
33 /* Supported Rx offloads */
34 static uint64_t dev_rx_offloads_sup
=
35 DEV_RX_OFFLOAD_VLAN_STRIP
|
36 DEV_RX_OFFLOAD_IPV4_CKSUM
|
37 DEV_RX_OFFLOAD_UDP_CKSUM
|
38 DEV_RX_OFFLOAD_TCP_CKSUM
|
39 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
|
40 DEV_RX_OFFLOAD_VLAN_FILTER
|
41 DEV_RX_OFFLOAD_JUMBO_FRAME
;
43 /* Rx offloads which cannot be disabled */
44 static uint64_t dev_rx_offloads_nodis
=
45 DEV_RX_OFFLOAD_SCATTER
;
47 /* Supported Tx offloads */
48 static uint64_t dev_tx_offloads_sup
=
49 DEV_TX_OFFLOAD_VLAN_INSERT
|
50 DEV_TX_OFFLOAD_IPV4_CKSUM
|
51 DEV_TX_OFFLOAD_UDP_CKSUM
|
52 DEV_TX_OFFLOAD_TCP_CKSUM
|
53 DEV_TX_OFFLOAD_SCTP_CKSUM
|
54 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
;
56 /* Tx offloads which cannot be disabled */
57 static uint64_t dev_tx_offloads_nodis
=
58 DEV_TX_OFFLOAD_MULTI_SEGS
|
59 DEV_TX_OFFLOAD_MT_LOCKFREE
|
60 DEV_TX_OFFLOAD_MBUF_FAST_FREE
;
62 /* enable timestamp in mbuf */
63 enum pmd_dpaa2_ts dpaa2_enable_ts
;
65 struct rte_dpaa2_xstats_name_off
{
66 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
67 uint8_t page_id
; /* dpni statistics page id */
68 uint8_t stats_id
; /* stats id in the given page */
71 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings
[] = {
72 {"ingress_multicast_frames", 0, 2},
73 {"ingress_multicast_bytes", 0, 3},
74 {"ingress_broadcast_frames", 0, 4},
75 {"ingress_broadcast_bytes", 0, 5},
76 {"egress_multicast_frames", 1, 2},
77 {"egress_multicast_bytes", 1, 3},
78 {"egress_broadcast_frames", 1, 4},
79 {"egress_broadcast_bytes", 1, 5},
80 {"ingress_filtered_frames", 2, 0},
81 {"ingress_discarded_frames", 2, 1},
82 {"ingress_nobuffer_discards", 2, 2},
83 {"egress_discarded_frames", 2, 3},
84 {"egress_confirmed_frames", 2, 4},
87 static const enum rte_filter_op dpaa2_supported_filter_ops
[] = {
89 RTE_ETH_FILTER_DELETE
,
90 RTE_ETH_FILTER_UPDATE
,
95 static struct rte_dpaa2_driver rte_dpaa2_pmd
;
96 static int dpaa2_dev_uninit(struct rte_eth_dev
*eth_dev
);
97 static int dpaa2_dev_link_update(struct rte_eth_dev
*dev
,
98 int wait_to_complete
);
99 static int dpaa2_dev_set_link_up(struct rte_eth_dev
*dev
);
100 static int dpaa2_dev_set_link_down(struct rte_eth_dev
*dev
);
101 static int dpaa2_dev_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
);
103 int dpaa2_logtype_pmd
;
105 __rte_experimental
void
106 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable
)
108 dpaa2_enable_ts
= enable
;
112 dpaa2_vlan_filter_set(struct rte_eth_dev
*dev
, uint16_t vlan_id
, int on
)
115 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
116 struct fsl_mc_io
*dpni
= priv
->hw
;
118 PMD_INIT_FUNC_TRACE();
121 DPAA2_PMD_ERR("dpni is NULL");
126 ret
= dpni_add_vlan_id(dpni
, CMD_PRI_LOW
,
127 priv
->token
, vlan_id
);
129 ret
= dpni_remove_vlan_id(dpni
, CMD_PRI_LOW
,
130 priv
->token
, vlan_id
);
133 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
134 ret
, vlan_id
, priv
->hw_id
);
140 dpaa2_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
)
142 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
143 struct fsl_mc_io
*dpni
= priv
->hw
;
146 PMD_INIT_FUNC_TRACE();
148 if (mask
& ETH_VLAN_FILTER_MASK
) {
149 /* VLAN Filter not avaialble */
150 if (!priv
->max_vlan_filters
) {
151 DPAA2_PMD_INFO("VLAN filter not available");
155 if (dev
->data
->dev_conf
.rxmode
.offloads
&
156 DEV_RX_OFFLOAD_VLAN_FILTER
)
157 ret
= dpni_enable_vlan_filter(dpni
, CMD_PRI_LOW
,
160 ret
= dpni_enable_vlan_filter(dpni
, CMD_PRI_LOW
,
163 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret
);
166 if (mask
& ETH_VLAN_EXTEND_MASK
) {
167 if (dev
->data
->dev_conf
.rxmode
.offloads
&
168 DEV_RX_OFFLOAD_VLAN_EXTEND
)
169 DPAA2_PMD_INFO("VLAN extend offload not supported");
176 dpaa2_vlan_tpid_set(struct rte_eth_dev
*dev
,
177 enum rte_vlan_type vlan_type __rte_unused
,
180 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
181 struct fsl_mc_io
*dpni
= priv
->hw
;
184 PMD_INIT_FUNC_TRACE();
186 /* nothing to be done for standard vlan tpids */
187 if (tpid
== 0x8100 || tpid
== 0x88A8)
190 ret
= dpni_add_custom_tpid(dpni
, CMD_PRI_LOW
,
193 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret
);
194 /* if already configured tpids, remove them first */
196 struct dpni_custom_tpid_cfg tpid_list
= {0};
198 ret
= dpni_get_custom_tpid(dpni
, CMD_PRI_LOW
,
199 priv
->token
, &tpid_list
);
202 ret
= dpni_remove_custom_tpid(dpni
, CMD_PRI_LOW
,
203 priv
->token
, tpid_list
.tpid1
);
206 ret
= dpni_add_custom_tpid(dpni
, CMD_PRI_LOW
,
214 dpaa2_fw_version_get(struct rte_eth_dev
*dev
,
219 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
220 struct fsl_mc_io
*dpni
= priv
->hw
;
221 struct mc_soc_version mc_plat_info
= {0};
222 struct mc_version mc_ver_info
= {0};
224 PMD_INIT_FUNC_TRACE();
226 if (mc_get_soc_version(dpni
, CMD_PRI_LOW
, &mc_plat_info
))
227 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
229 if (mc_get_version(dpni
, CMD_PRI_LOW
, &mc_ver_info
))
230 DPAA2_PMD_WARN("\tmc_get_version failed");
232 ret
= snprintf(fw_version
, fw_size
,
237 mc_ver_info
.revision
);
239 ret
+= 1; /* add the size of '\0' */
240 if (fw_size
< (uint32_t)ret
)
247 dpaa2_dev_info_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
249 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
251 PMD_INIT_FUNC_TRACE();
253 dev_info
->if_index
= priv
->hw_id
;
255 dev_info
->max_mac_addrs
= priv
->max_mac_filters
;
256 dev_info
->max_rx_pktlen
= DPAA2_MAX_RX_PKT_LEN
;
257 dev_info
->min_rx_bufsize
= DPAA2_MIN_RX_BUF_SIZE
;
258 dev_info
->max_rx_queues
= (uint16_t)priv
->nb_rx_queues
;
259 dev_info
->max_tx_queues
= (uint16_t)priv
->nb_tx_queues
;
260 dev_info
->rx_offload_capa
= dev_rx_offloads_sup
|
261 dev_rx_offloads_nodis
;
262 dev_info
->tx_offload_capa
= dev_tx_offloads_sup
|
263 dev_tx_offloads_nodis
;
264 dev_info
->speed_capa
= ETH_LINK_SPEED_1G
|
265 ETH_LINK_SPEED_2_5G
|
268 dev_info
->max_hash_mac_addrs
= 0;
269 dev_info
->max_vfs
= 0;
270 dev_info
->max_vmdq_pools
= ETH_16_POOLS
;
271 dev_info
->flow_type_rss_offloads
= DPAA2_RSS_OFFLOAD_ALL
;
275 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev
*dev
)
277 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
280 uint8_t num_rxqueue_per_tc
;
281 struct dpaa2_queue
*mc_q
, *mcq
;
284 struct dpaa2_queue
*dpaa2_q
;
286 PMD_INIT_FUNC_TRACE();
288 num_rxqueue_per_tc
= (priv
->nb_rx_queues
/ priv
->num_rx_tc
);
289 tot_queues
= priv
->nb_rx_queues
+ priv
->nb_tx_queues
;
290 mc_q
= rte_malloc(NULL
, sizeof(struct dpaa2_queue
) * tot_queues
,
291 RTE_CACHE_LINE_SIZE
);
293 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
297 for (i
= 0; i
< priv
->nb_rx_queues
; i
++) {
298 mc_q
->eth_data
= dev
->data
;
299 priv
->rx_vq
[i
] = mc_q
++;
300 dpaa2_q
= (struct dpaa2_queue
*)priv
->rx_vq
[i
];
301 dpaa2_q
->q_storage
= rte_malloc("dq_storage",
302 sizeof(struct queue_storage_info_t
),
303 RTE_CACHE_LINE_SIZE
);
304 if (!dpaa2_q
->q_storage
)
307 memset(dpaa2_q
->q_storage
, 0,
308 sizeof(struct queue_storage_info_t
));
309 if (dpaa2_alloc_dq_storage(dpaa2_q
->q_storage
))
313 for (i
= 0; i
< priv
->nb_tx_queues
; i
++) {
314 mc_q
->eth_data
= dev
->data
;
315 mc_q
->flow_id
= 0xffff;
316 priv
->tx_vq
[i
] = mc_q
++;
317 dpaa2_q
= (struct dpaa2_queue
*)priv
->tx_vq
[i
];
318 dpaa2_q
->cscn
= rte_malloc(NULL
,
319 sizeof(struct qbman_result
), 16);
325 for (dist_idx
= 0; dist_idx
< priv
->nb_rx_queues
; dist_idx
++) {
326 mcq
= (struct dpaa2_queue
*)priv
->rx_vq
[vq_id
];
327 mcq
->tc_index
= dist_idx
/ num_rxqueue_per_tc
;
328 mcq
->flow_id
= dist_idx
% num_rxqueue_per_tc
;
336 dpaa2_q
= (struct dpaa2_queue
*)priv
->tx_vq
[i
];
337 rte_free(dpaa2_q
->cscn
);
338 priv
->tx_vq
[i
--] = NULL
;
340 i
= priv
->nb_rx_queues
;
343 mc_q
= priv
->rx_vq
[0];
345 dpaa2_q
= (struct dpaa2_queue
*)priv
->rx_vq
[i
];
346 dpaa2_free_dq_storage(dpaa2_q
->q_storage
);
347 rte_free(dpaa2_q
->q_storage
);
348 priv
->rx_vq
[i
--] = NULL
;
355 dpaa2_free_rx_tx_queues(struct rte_eth_dev
*dev
)
357 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
358 struct dpaa2_queue
*dpaa2_q
;
361 PMD_INIT_FUNC_TRACE();
363 /* Queue allocation base */
364 if (priv
->rx_vq
[0]) {
365 /* cleaning up queue storage */
366 for (i
= 0; i
< priv
->nb_rx_queues
; i
++) {
367 dpaa2_q
= (struct dpaa2_queue
*)priv
->rx_vq
[i
];
368 if (dpaa2_q
->q_storage
)
369 rte_free(dpaa2_q
->q_storage
);
371 /* cleanup tx queue cscn */
372 for (i
= 0; i
< priv
->nb_tx_queues
; i
++) {
373 dpaa2_q
= (struct dpaa2_queue
*)priv
->tx_vq
[i
];
374 rte_free(dpaa2_q
->cscn
);
376 /*free memory for all queues (RX+TX) */
377 rte_free(priv
->rx_vq
[0]);
378 priv
->rx_vq
[0] = NULL
;
383 dpaa2_eth_dev_configure(struct rte_eth_dev
*dev
)
385 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
386 struct fsl_mc_io
*dpni
= priv
->hw
;
387 struct rte_eth_conf
*eth_conf
= &dev
->data
->dev_conf
;
388 uint64_t rx_offloads
= eth_conf
->rxmode
.offloads
;
389 uint64_t tx_offloads
= eth_conf
->txmode
.offloads
;
390 int rx_l3_csum_offload
= false;
391 int rx_l4_csum_offload
= false;
392 int tx_l3_csum_offload
= false;
393 int tx_l4_csum_offload
= false;
396 PMD_INIT_FUNC_TRACE();
398 /* Rx offloads validation */
399 if (dev_rx_offloads_nodis
& ~rx_offloads
) {
401 "Rx offloads non configurable - requested 0x%" PRIx64
402 " ignored 0x%" PRIx64
,
403 rx_offloads
, dev_rx_offloads_nodis
);
406 /* Tx offloads validation */
407 if (dev_tx_offloads_nodis
& ~tx_offloads
) {
409 "Tx offloads non configurable - requested 0x%" PRIx64
410 " ignored 0x%" PRIx64
,
411 tx_offloads
, dev_tx_offloads_nodis
);
414 if (rx_offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
) {
415 if (eth_conf
->rxmode
.max_rx_pkt_len
<= DPAA2_MAX_RX_PKT_LEN
) {
416 ret
= dpni_set_max_frame_length(dpni
, CMD_PRI_LOW
,
417 priv
->token
, eth_conf
->rxmode
.max_rx_pkt_len
);
420 "Unable to set mtu. check config");
428 if (eth_conf
->rxmode
.mq_mode
== ETH_MQ_RX_RSS
) {
429 ret
= dpaa2_setup_flow_dist(dev
,
430 eth_conf
->rx_adv_conf
.rss_conf
.rss_hf
);
432 DPAA2_PMD_ERR("Unable to set flow distribution."
433 "Check queue config");
438 if (rx_offloads
& DEV_RX_OFFLOAD_IPV4_CKSUM
)
439 rx_l3_csum_offload
= true;
441 if ((rx_offloads
& DEV_RX_OFFLOAD_UDP_CKSUM
) ||
442 (rx_offloads
& DEV_RX_OFFLOAD_TCP_CKSUM
))
443 rx_l4_csum_offload
= true;
445 ret
= dpni_set_offload(dpni
, CMD_PRI_LOW
, priv
->token
,
446 DPNI_OFF_RX_L3_CSUM
, rx_l3_csum_offload
);
448 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret
);
452 ret
= dpni_set_offload(dpni
, CMD_PRI_LOW
, priv
->token
,
453 DPNI_OFF_RX_L4_CSUM
, rx_l4_csum_offload
);
455 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret
);
459 if (tx_offloads
& DEV_TX_OFFLOAD_IPV4_CKSUM
)
460 tx_l3_csum_offload
= true;
462 if ((tx_offloads
& DEV_TX_OFFLOAD_UDP_CKSUM
) ||
463 (tx_offloads
& DEV_TX_OFFLOAD_TCP_CKSUM
) ||
464 (tx_offloads
& DEV_TX_OFFLOAD_SCTP_CKSUM
))
465 tx_l4_csum_offload
= true;
467 ret
= dpni_set_offload(dpni
, CMD_PRI_LOW
, priv
->token
,
468 DPNI_OFF_TX_L3_CSUM
, tx_l3_csum_offload
);
470 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret
);
474 ret
= dpni_set_offload(dpni
, CMD_PRI_LOW
, priv
->token
,
475 DPNI_OFF_TX_L4_CSUM
, tx_l4_csum_offload
);
477 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret
);
481 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
482 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
483 * to 0 for LS2 in the hardware thus disabling data/annotation
484 * stashing. For LX2 this is fixed in hardware and thus hash result and
485 * parse results can be received in FD using this option.
487 if (dpaa2_svr_family
== SVR_LX2160A
) {
488 ret
= dpni_set_offload(dpni
, CMD_PRI_LOW
, priv
->token
,
489 DPNI_FLCTYPE_HASH
, true);
491 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret
);
496 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)
497 dpaa2_vlan_offload_set(dev
, ETH_VLAN_FILTER_MASK
);
499 /* update the current status */
500 dpaa2_dev_link_update(dev
, 0);
505 /* Function to setup RX flow information. It contains traffic class ID,
506 * flow ID, destination configuration etc.
509 dpaa2_dev_rx_queue_setup(struct rte_eth_dev
*dev
,
510 uint16_t rx_queue_id
,
511 uint16_t nb_rx_desc __rte_unused
,
512 unsigned int socket_id __rte_unused
,
513 const struct rte_eth_rxconf
*rx_conf __rte_unused
,
514 struct rte_mempool
*mb_pool
)
516 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
517 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
518 struct dpaa2_queue
*dpaa2_q
;
519 struct dpni_queue cfg
;
525 PMD_INIT_FUNC_TRACE();
527 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
528 dev
, rx_queue_id
, mb_pool
, rx_conf
);
530 if (!priv
->bp_list
|| priv
->bp_list
->mp
!= mb_pool
) {
531 bpid
= mempool_to_bpid(mb_pool
);
532 ret
= dpaa2_attach_bp_list(priv
,
533 rte_dpaa2_bpid_info
[bpid
].bp_list
);
537 dpaa2_q
= (struct dpaa2_queue
*)priv
->rx_vq
[rx_queue_id
];
538 dpaa2_q
->mb_pool
= mb_pool
; /**< mbuf pool to populate RX ring. */
539 dpaa2_q
->bp_array
= rte_dpaa2_bpid_info
;
541 /*Get the flow id from given VQ id*/
542 flow_id
= rx_queue_id
% priv
->nb_rx_queues
;
543 memset(&cfg
, 0, sizeof(struct dpni_queue
));
545 options
= options
| DPNI_QUEUE_OPT_USER_CTX
;
546 cfg
.user_context
= (size_t)(dpaa2_q
);
548 /*if ls2088 or rev2 device, enable the stashing */
550 if ((dpaa2_svr_family
& 0xffff0000) != SVR_LS2080A
) {
551 options
|= DPNI_QUEUE_OPT_FLC
;
552 cfg
.flc
.stash_control
= true;
553 cfg
.flc
.value
&= 0xFFFFFFFFFFFFFFC0;
554 /* 00 00 00 - last 6 bit represent annotation, context stashing,
555 * data stashing setting 01 01 00 (0x14)
556 * (in following order ->DS AS CS)
557 * to enable 1 line data, 1 line annotation.
558 * For LX2, this setting should be 01 00 00 (0x10)
560 if ((dpaa2_svr_family
& 0xffff0000) == SVR_LX2160A
)
561 cfg
.flc
.value
|= 0x10;
563 cfg
.flc
.value
|= 0x14;
565 ret
= dpni_set_queue(dpni
, CMD_PRI_LOW
, priv
->token
, DPNI_QUEUE_RX
,
566 dpaa2_q
->tc_index
, flow_id
, options
, &cfg
);
568 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret
);
572 if (!(priv
->flags
& DPAA2_RX_TAILDROP_OFF
)) {
573 struct dpni_taildrop taildrop
;
576 /*enabling per rx queue congestion control */
577 taildrop
.threshold
= CONG_THRESHOLD_RX_Q
;
578 taildrop
.units
= DPNI_CONGESTION_UNIT_BYTES
;
579 taildrop
.oal
= CONG_RX_OAL
;
580 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
582 ret
= dpni_set_taildrop(dpni
, CMD_PRI_LOW
, priv
->token
,
583 DPNI_CP_QUEUE
, DPNI_QUEUE_RX
,
584 dpaa2_q
->tc_index
, flow_id
, &taildrop
);
586 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
592 dev
->data
->rx_queues
[rx_queue_id
] = dpaa2_q
;
597 dpaa2_dev_tx_queue_setup(struct rte_eth_dev
*dev
,
598 uint16_t tx_queue_id
,
599 uint16_t nb_tx_desc __rte_unused
,
600 unsigned int socket_id __rte_unused
,
601 const struct rte_eth_txconf
*tx_conf __rte_unused
)
603 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
604 struct dpaa2_queue
*dpaa2_q
= (struct dpaa2_queue
*)
605 priv
->tx_vq
[tx_queue_id
];
606 struct fsl_mc_io
*dpni
= priv
->hw
;
607 struct dpni_queue tx_conf_cfg
;
608 struct dpni_queue tx_flow_cfg
;
609 uint8_t options
= 0, flow_id
;
613 PMD_INIT_FUNC_TRACE();
615 /* Return if queue already configured */
616 if (dpaa2_q
->flow_id
!= 0xffff) {
617 dev
->data
->tx_queues
[tx_queue_id
] = dpaa2_q
;
621 memset(&tx_conf_cfg
, 0, sizeof(struct dpni_queue
));
622 memset(&tx_flow_cfg
, 0, sizeof(struct dpni_queue
));
627 ret
= dpni_set_queue(dpni
, CMD_PRI_LOW
, priv
->token
, DPNI_QUEUE_TX
,
628 tc_id
, flow_id
, options
, &tx_flow_cfg
);
630 DPAA2_PMD_ERR("Error in setting the tx flow: "
631 "tc_id=%d, flow=%d err=%d",
632 tc_id
, flow_id
, ret
);
636 dpaa2_q
->flow_id
= flow_id
;
638 if (tx_queue_id
== 0) {
639 /*Set tx-conf and error configuration*/
640 ret
= dpni_set_tx_confirmation_mode(dpni
, CMD_PRI_LOW
,
644 DPAA2_PMD_ERR("Error in set tx conf mode settings: "
649 dpaa2_q
->tc_index
= tc_id
;
651 if (!(priv
->flags
& DPAA2_TX_CGR_OFF
)) {
652 struct dpni_congestion_notification_cfg cong_notif_cfg
;
654 cong_notif_cfg
.units
= DPNI_CONGESTION_UNIT_FRAMES
;
655 cong_notif_cfg
.threshold_entry
= CONG_ENTER_TX_THRESHOLD
;
656 /* Notify that the queue is not congested when the data in
657 * the queue is below this thershold.
659 cong_notif_cfg
.threshold_exit
= CONG_EXIT_TX_THRESHOLD
;
660 cong_notif_cfg
.message_ctx
= 0;
661 cong_notif_cfg
.message_iova
=
662 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q
->cscn
);
663 cong_notif_cfg
.dest_cfg
.dest_type
= DPNI_DEST_NONE
;
664 cong_notif_cfg
.notification_mode
=
665 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER
|
666 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT
|
667 DPNI_CONG_OPT_COHERENT_WRITE
;
668 cong_notif_cfg
.cg_point
= DPNI_CP_QUEUE
;
670 ret
= dpni_set_congestion_notification(dpni
, CMD_PRI_LOW
,
677 "Error in setting tx congestion notification: "
682 dpaa2_q
->cb_eqresp_free
= dpaa2_dev_free_eqresp_buf
;
683 dev
->data
->tx_queues
[tx_queue_id
] = dpaa2_q
;
688 dpaa2_dev_rx_queue_release(void *q __rte_unused
)
690 PMD_INIT_FUNC_TRACE();
694 dpaa2_dev_tx_queue_release(void *q __rte_unused
)
696 PMD_INIT_FUNC_TRACE();
700 dpaa2_dev_rx_queue_count(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
703 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
704 struct dpaa2_queue
*dpaa2_q
;
705 struct qbman_swp
*swp
;
706 struct qbman_fq_query_np_rslt state
;
707 uint32_t frame_cnt
= 0;
709 PMD_INIT_FUNC_TRACE();
711 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
712 ret
= dpaa2_affine_qbman_swp();
714 DPAA2_PMD_ERR("Failure in affining portal");
718 swp
= DPAA2_PER_LCORE_PORTAL
;
720 dpaa2_q
= (struct dpaa2_queue
*)priv
->rx_vq
[rx_queue_id
];
722 if (qbman_fq_query_state(swp
, dpaa2_q
->fqid
, &state
) == 0) {
723 frame_cnt
= qbman_fq_state_frame_count(&state
);
724 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
725 rx_queue_id
, frame_cnt
);
730 static const uint32_t *
731 dpaa2_supported_ptypes_get(struct rte_eth_dev
*dev
)
733 static const uint32_t ptypes
[] = {
734 /*todo -= add more types */
737 RTE_PTYPE_L3_IPV4_EXT
,
739 RTE_PTYPE_L3_IPV6_EXT
,
747 if (dev
->rx_pkt_burst
== dpaa2_dev_prefetch_rx
||
748 dev
->rx_pkt_burst
== dpaa2_dev_loopback_rx
)
754 * Dpaa2 link Interrupt handler
757 * The address of parameter (struct rte_eth_dev *) regsitered before.
763 dpaa2_interrupt_handler(void *param
)
765 struct rte_eth_dev
*dev
= param
;
766 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
767 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
769 int irq_index
= DPNI_IRQ_INDEX
;
770 unsigned int status
= 0, clear
= 0;
772 PMD_INIT_FUNC_TRACE();
775 DPAA2_PMD_ERR("dpni is NULL");
779 ret
= dpni_get_irq_status(dpni
, CMD_PRI_LOW
, priv
->token
,
782 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret
);
787 if (status
& DPNI_IRQ_EVENT_LINK_CHANGED
) {
788 clear
= DPNI_IRQ_EVENT_LINK_CHANGED
;
789 dpaa2_dev_link_update(dev
, 0);
790 /* calling all the apps registered for link status event */
791 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_INTR_LSC
,
795 ret
= dpni_clear_irq_status(dpni
, CMD_PRI_LOW
, priv
->token
,
798 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret
);
802 dpaa2_eth_setup_irqs(struct rte_eth_dev
*dev
, int enable
)
805 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
806 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
807 int irq_index
= DPNI_IRQ_INDEX
;
808 unsigned int mask
= DPNI_IRQ_EVENT_LINK_CHANGED
;
810 PMD_INIT_FUNC_TRACE();
812 err
= dpni_set_irq_mask(dpni
, CMD_PRI_LOW
, priv
->token
,
815 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err
,
820 err
= dpni_set_irq_enable(dpni
, CMD_PRI_LOW
, priv
->token
,
823 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err
,
830 dpaa2_dev_start(struct rte_eth_dev
*dev
)
832 struct rte_device
*rdev
= dev
->device
;
833 struct rte_dpaa2_device
*dpaa2_dev
;
834 struct rte_eth_dev_data
*data
= dev
->data
;
835 struct dpaa2_dev_priv
*priv
= data
->dev_private
;
836 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
837 struct dpni_queue cfg
;
838 struct dpni_error_cfg err_cfg
;
840 struct dpni_queue_id qid
;
841 struct dpaa2_queue
*dpaa2_q
;
843 struct rte_intr_handle
*intr_handle
;
845 dpaa2_dev
= container_of(rdev
, struct rte_dpaa2_device
, device
);
846 intr_handle
= &dpaa2_dev
->intr_handle
;
848 PMD_INIT_FUNC_TRACE();
850 ret
= dpni_enable(dpni
, CMD_PRI_LOW
, priv
->token
);
852 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
857 /* Power up the phy. Needed to make the link go UP */
858 dpaa2_dev_set_link_up(dev
);
860 ret
= dpni_get_qdid(dpni
, CMD_PRI_LOW
, priv
->token
,
861 DPNI_QUEUE_TX
, &qdid
);
863 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret
);
868 for (i
= 0; i
< data
->nb_rx_queues
; i
++) {
869 dpaa2_q
= (struct dpaa2_queue
*)data
->rx_queues
[i
];
870 ret
= dpni_get_queue(dpni
, CMD_PRI_LOW
, priv
->token
,
871 DPNI_QUEUE_RX
, dpaa2_q
->tc_index
,
872 dpaa2_q
->flow_id
, &cfg
, &qid
);
874 DPAA2_PMD_ERR("Error in getting flow information: "
878 dpaa2_q
->fqid
= qid
.fqid
;
881 /*checksum errors, send them to normal path and set it in annotation */
882 err_cfg
.errors
= DPNI_ERROR_L3CE
| DPNI_ERROR_L4CE
;
883 err_cfg
.errors
|= DPNI_ERROR_PHE
;
885 err_cfg
.error_action
= DPNI_ERROR_ACTION_CONTINUE
;
886 err_cfg
.set_frame_annotation
= true;
888 ret
= dpni_set_errors_behavior(dpni
, CMD_PRI_LOW
,
889 priv
->token
, &err_cfg
);
891 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
896 /* if the interrupts were configured on this devices*/
897 if (intr_handle
&& (intr_handle
->fd
) &&
898 (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)) {
899 /* Registering LSC interrupt handler */
900 rte_intr_callback_register(intr_handle
,
901 dpaa2_interrupt_handler
,
904 /* enable vfio intr/eventfd mapping
905 * Interrupt index 0 is required, so we can not use
908 rte_dpaa2_intr_enable(intr_handle
, DPNI_IRQ_INDEX
);
910 /* enable dpni_irqs */
911 dpaa2_eth_setup_irqs(dev
, 1);
914 /* Change the tx burst function if ordered queues are used */
915 if (priv
->en_ordered
)
916 dev
->tx_pkt_burst
= dpaa2_dev_tx_ordered
;
922 * This routine disables all traffic on the adapter by issuing a
923 * global reset on the MAC.
926 dpaa2_dev_stop(struct rte_eth_dev
*dev
)
928 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
929 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
931 struct rte_eth_link link
;
932 struct rte_intr_handle
*intr_handle
= dev
->intr_handle
;
934 PMD_INIT_FUNC_TRACE();
936 /* reset interrupt callback */
937 if (intr_handle
&& (intr_handle
->fd
) &&
938 (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)) {
939 /*disable dpni irqs */
940 dpaa2_eth_setup_irqs(dev
, 0);
942 /* disable vfio intr before callback unregister */
943 rte_dpaa2_intr_disable(intr_handle
, DPNI_IRQ_INDEX
);
945 /* Unregistering LSC interrupt handler */
946 rte_intr_callback_unregister(intr_handle
,
947 dpaa2_interrupt_handler
,
951 dpaa2_dev_set_link_down(dev
);
953 ret
= dpni_disable(dpni
, CMD_PRI_LOW
, priv
->token
);
955 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
960 /* clear the recorded link status */
961 memset(&link
, 0, sizeof(link
));
962 rte_eth_linkstatus_set(dev
, &link
);
966 dpaa2_dev_close(struct rte_eth_dev
*dev
)
968 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
969 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
971 struct rte_eth_link link
;
973 PMD_INIT_FUNC_TRACE();
975 dpaa2_flow_clean(dev
);
977 /* Clean the device first */
978 ret
= dpni_reset(dpni
, CMD_PRI_LOW
, priv
->token
);
980 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret
);
984 memset(&link
, 0, sizeof(link
));
985 rte_eth_linkstatus_set(dev
, &link
);
989 dpaa2_dev_promiscuous_enable(
990 struct rte_eth_dev
*dev
)
993 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
994 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
996 PMD_INIT_FUNC_TRACE();
999 DPAA2_PMD_ERR("dpni is NULL");
1003 ret
= dpni_set_unicast_promisc(dpni
, CMD_PRI_LOW
, priv
->token
, true);
1005 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret
);
1007 ret
= dpni_set_multicast_promisc(dpni
, CMD_PRI_LOW
, priv
->token
, true);
1009 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret
);
1013 dpaa2_dev_promiscuous_disable(
1014 struct rte_eth_dev
*dev
)
1017 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1018 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1020 PMD_INIT_FUNC_TRACE();
1023 DPAA2_PMD_ERR("dpni is NULL");
1027 ret
= dpni_set_unicast_promisc(dpni
, CMD_PRI_LOW
, priv
->token
, false);
1029 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret
);
1031 if (dev
->data
->all_multicast
== 0) {
1032 ret
= dpni_set_multicast_promisc(dpni
, CMD_PRI_LOW
,
1033 priv
->token
, false);
1035 DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1041 dpaa2_dev_allmulticast_enable(
1042 struct rte_eth_dev
*dev
)
1045 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1046 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1048 PMD_INIT_FUNC_TRACE();
1051 DPAA2_PMD_ERR("dpni is NULL");
1055 ret
= dpni_set_multicast_promisc(dpni
, CMD_PRI_LOW
, priv
->token
, true);
1057 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret
);
1061 dpaa2_dev_allmulticast_disable(struct rte_eth_dev
*dev
)
1064 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1065 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1067 PMD_INIT_FUNC_TRACE();
1070 DPAA2_PMD_ERR("dpni is NULL");
1074 /* must remain on for all promiscuous */
1075 if (dev
->data
->promiscuous
== 1)
1078 ret
= dpni_set_multicast_promisc(dpni
, CMD_PRI_LOW
, priv
->token
, false);
1080 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret
);
1084 dpaa2_dev_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
)
1087 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1088 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1089 uint32_t frame_size
= mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
1092 PMD_INIT_FUNC_TRACE();
1095 DPAA2_PMD_ERR("dpni is NULL");
1099 /* check that mtu is within the allowed range */
1100 if ((mtu
< ETHER_MIN_MTU
) || (frame_size
> DPAA2_MAX_RX_PKT_LEN
))
1103 if (frame_size
> ETHER_MAX_LEN
)
1104 dev
->data
->dev_conf
.rxmode
.offloads
&=
1105 DEV_RX_OFFLOAD_JUMBO_FRAME
;
1107 dev
->data
->dev_conf
.rxmode
.offloads
&=
1108 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
1110 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
= frame_size
;
1112 /* Set the Max Rx frame length as 'mtu' +
1113 * Maximum Ethernet header length
1115 ret
= dpni_set_max_frame_length(dpni
, CMD_PRI_LOW
, priv
->token
,
1118 DPAA2_PMD_ERR("Setting the max frame length failed");
1121 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu
);
1126 dpaa2_dev_add_mac_addr(struct rte_eth_dev
*dev
,
1127 struct ether_addr
*addr
,
1128 __rte_unused
uint32_t index
,
1129 __rte_unused
uint32_t pool
)
1132 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1133 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1135 PMD_INIT_FUNC_TRACE();
1138 DPAA2_PMD_ERR("dpni is NULL");
1142 ret
= dpni_add_mac_addr(dpni
, CMD_PRI_LOW
,
1143 priv
->token
, addr
->addr_bytes
);
1146 "error: Adding the MAC ADDR failed: err = %d", ret
);
1151 dpaa2_dev_remove_mac_addr(struct rte_eth_dev
*dev
,
1155 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1156 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1157 struct rte_eth_dev_data
*data
= dev
->data
;
1158 struct ether_addr
*macaddr
;
1160 PMD_INIT_FUNC_TRACE();
1162 macaddr
= &data
->mac_addrs
[index
];
1165 DPAA2_PMD_ERR("dpni is NULL");
1169 ret
= dpni_remove_mac_addr(dpni
, CMD_PRI_LOW
,
1170 priv
->token
, macaddr
->addr_bytes
);
1173 "error: Removing the MAC ADDR failed: err = %d", ret
);
1177 dpaa2_dev_set_mac_addr(struct rte_eth_dev
*dev
,
1178 struct ether_addr
*addr
)
1181 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1182 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1184 PMD_INIT_FUNC_TRACE();
1187 DPAA2_PMD_ERR("dpni is NULL");
1191 ret
= dpni_set_primary_mac_addr(dpni
, CMD_PRI_LOW
,
1192 priv
->token
, addr
->addr_bytes
);
1196 "error: Setting the MAC ADDR failed %d", ret
);
1202 int dpaa2_dev_stats_get(struct rte_eth_dev
*dev
,
1203 struct rte_eth_stats
*stats
)
1205 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1206 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1208 uint8_t page0
= 0, page1
= 1, page2
= 2;
1209 union dpni_statistics value
;
1211 struct dpaa2_queue
*dpaa2_rxq
, *dpaa2_txq
;
1213 memset(&value
, 0, sizeof(union dpni_statistics
));
1215 PMD_INIT_FUNC_TRACE();
1218 DPAA2_PMD_ERR("dpni is NULL");
1223 DPAA2_PMD_ERR("stats is NULL");
1227 /*Get Counters from page_0*/
1228 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1233 stats
->ipackets
= value
.page_0
.ingress_all_frames
;
1234 stats
->ibytes
= value
.page_0
.ingress_all_bytes
;
1236 /*Get Counters from page_1*/
1237 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1242 stats
->opackets
= value
.page_1
.egress_all_frames
;
1243 stats
->obytes
= value
.page_1
.egress_all_bytes
;
1245 /*Get Counters from page_2*/
1246 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1251 /* Ingress drop frame count due to configured rules */
1252 stats
->ierrors
= value
.page_2
.ingress_filtered_frames
;
1253 /* Ingress drop frame count due to error */
1254 stats
->ierrors
+= value
.page_2
.ingress_discarded_frames
;
1256 stats
->oerrors
= value
.page_2
.egress_discarded_frames
;
1257 stats
->imissed
= value
.page_2
.ingress_nobuffer_discards
;
1259 /* Fill in per queue stats */
1260 for (i
= 0; (i
< RTE_ETHDEV_QUEUE_STAT_CNTRS
) &&
1261 (i
< priv
->nb_rx_queues
|| i
< priv
->nb_tx_queues
); ++i
) {
1262 dpaa2_rxq
= (struct dpaa2_queue
*)priv
->rx_vq
[i
];
1263 dpaa2_txq
= (struct dpaa2_queue
*)priv
->tx_vq
[i
];
1265 stats
->q_ipackets
[i
] = dpaa2_rxq
->rx_pkts
;
1267 stats
->q_opackets
[i
] = dpaa2_txq
->tx_pkts
;
1269 /* Byte counting is not implemented */
1270 stats
->q_ibytes
[i
] = 0;
1271 stats
->q_obytes
[i
] = 0;
1277 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode
);
1282 dpaa2_dev_xstats_get(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
1285 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1286 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1288 union dpni_statistics value
[3] = {};
1289 unsigned int i
= 0, num
= RTE_DIM(dpaa2_xstats_strings
);
1297 /* Get Counters from page_0*/
1298 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1303 /* Get Counters from page_1*/
1304 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1309 /* Get Counters from page_2*/
1310 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1315 for (i
= 0; i
< num
; i
++) {
1317 xstats
[i
].value
= value
[dpaa2_xstats_strings
[i
].page_id
].
1318 raw
.counter
[dpaa2_xstats_strings
[i
].stats_id
];
1322 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode
);
1327 dpaa2_xstats_get_names(__rte_unused
struct rte_eth_dev
*dev
,
1328 struct rte_eth_xstat_name
*xstats_names
,
1331 unsigned int i
, stat_cnt
= RTE_DIM(dpaa2_xstats_strings
);
1333 if (limit
< stat_cnt
)
1336 if (xstats_names
!= NULL
)
1337 for (i
= 0; i
< stat_cnt
; i
++)
1338 strlcpy(xstats_names
[i
].name
,
1339 dpaa2_xstats_strings
[i
].name
,
1340 sizeof(xstats_names
[i
].name
));
1346 dpaa2_xstats_get_by_id(struct rte_eth_dev
*dev
, const uint64_t *ids
,
1347 uint64_t *values
, unsigned int n
)
1349 unsigned int i
, stat_cnt
= RTE_DIM(dpaa2_xstats_strings
);
1350 uint64_t values_copy
[stat_cnt
];
1353 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1354 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1356 union dpni_statistics value
[3] = {};
1364 /* Get Counters from page_0*/
1365 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1370 /* Get Counters from page_1*/
1371 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1376 /* Get Counters from page_2*/
1377 retcode
= dpni_get_statistics(dpni
, CMD_PRI_LOW
, priv
->token
,
1382 for (i
= 0; i
< stat_cnt
; i
++) {
1383 values
[i
] = value
[dpaa2_xstats_strings
[i
].page_id
].
1384 raw
.counter
[dpaa2_xstats_strings
[i
].stats_id
];
1389 dpaa2_xstats_get_by_id(dev
, NULL
, values_copy
, stat_cnt
);
1391 for (i
= 0; i
< n
; i
++) {
1392 if (ids
[i
] >= stat_cnt
) {
1393 DPAA2_PMD_ERR("xstats id value isn't valid");
1396 values
[i
] = values_copy
[ids
[i
]];
1402 dpaa2_xstats_get_names_by_id(
1403 struct rte_eth_dev
*dev
,
1404 struct rte_eth_xstat_name
*xstats_names
,
1405 const uint64_t *ids
,
1408 unsigned int i
, stat_cnt
= RTE_DIM(dpaa2_xstats_strings
);
1409 struct rte_eth_xstat_name xstats_names_copy
[stat_cnt
];
1412 return dpaa2_xstats_get_names(dev
, xstats_names
, limit
);
1414 dpaa2_xstats_get_names(dev
, xstats_names_copy
, limit
);
1416 for (i
= 0; i
< limit
; i
++) {
1417 if (ids
[i
] >= stat_cnt
) {
1418 DPAA2_PMD_ERR("xstats id value isn't valid");
1421 strcpy(xstats_names
[i
].name
, xstats_names_copy
[ids
[i
]].name
);
1427 dpaa2_dev_stats_reset(struct rte_eth_dev
*dev
)
1429 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1430 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1433 struct dpaa2_queue
*dpaa2_q
;
1435 PMD_INIT_FUNC_TRACE();
1438 DPAA2_PMD_ERR("dpni is NULL");
1442 retcode
= dpni_reset_statistics(dpni
, CMD_PRI_LOW
, priv
->token
);
1446 /* Reset the per queue stats in dpaa2_queue structure */
1447 for (i
= 0; i
< priv
->nb_rx_queues
; i
++) {
1448 dpaa2_q
= (struct dpaa2_queue
*)priv
->rx_vq
[i
];
1450 dpaa2_q
->rx_pkts
= 0;
1453 for (i
= 0; i
< priv
->nb_tx_queues
; i
++) {
1454 dpaa2_q
= (struct dpaa2_queue
*)priv
->tx_vq
[i
];
1456 dpaa2_q
->tx_pkts
= 0;
1462 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode
);
1466 /* return 0 means link status changed, -1 means not changed */
1468 dpaa2_dev_link_update(struct rte_eth_dev
*dev
,
1469 int wait_to_complete __rte_unused
)
1472 struct dpaa2_dev_priv
*priv
= dev
->data
->dev_private
;
1473 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
1474 struct rte_eth_link link
;
1475 struct dpni_link_state state
= {0};
1478 DPAA2_PMD_ERR("dpni is NULL");
1482 ret
= dpni_get_link_state(dpni
, CMD_PRI_LOW
, priv
->token
, &state
);
1484 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret
);
1488 memset(&link
, 0, sizeof(struct rte_eth_link
));
1489 link
.link_status
= state
.up
;
1490 link
.link_speed
= state
.rate
;
1492 if (state
.options
& DPNI_LINK_OPT_HALF_DUPLEX
)
1493 link
.link_duplex
= ETH_LINK_HALF_DUPLEX
;
1495 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
1497 ret
= rte_eth_linkstatus_set(dev
, &link
);
1499 DPAA2_PMD_DEBUG("No change in status");
1501 DPAA2_PMD_INFO("Port %d Link is %s\n", dev
->data
->port_id
,
1502 link
.link_status
? "Up" : "Down");
1508 * Toggle the DPNI to enable, if not already enabled.
1509 * This is not strictly PHY up/down - it is more of logical toggling.
1512 dpaa2_dev_set_link_up(struct rte_eth_dev
*dev
)
1515 struct dpaa2_dev_priv
*priv
;
1516 struct fsl_mc_io
*dpni
;
1518 struct dpni_link_state state
= {0};
1520 priv
= dev
->data
->dev_private
;
1521 dpni
= (struct fsl_mc_io
*)priv
->hw
;
1524 DPAA2_PMD_ERR("dpni is NULL");
1528 /* Check if DPNI is currently enabled */
1529 ret
= dpni_is_enabled(dpni
, CMD_PRI_LOW
, priv
->token
, &en
);
1531 /* Unable to obtain dpni status; Not continuing */
1532 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret
);
1536 /* Enable link if not already enabled */
1538 ret
= dpni_enable(dpni
, CMD_PRI_LOW
, priv
->token
);
1540 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret
);
1544 ret
= dpni_get_link_state(dpni
, CMD_PRI_LOW
, priv
->token
, &state
);
1546 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret
);
1550 /* changing tx burst function to start enqueues */
1551 dev
->tx_pkt_burst
= dpaa2_dev_tx
;
1552 dev
->data
->dev_link
.link_status
= state
.up
;
1555 DPAA2_PMD_INFO("Port %d Link is Up", dev
->data
->port_id
);
1557 DPAA2_PMD_INFO("Port %d Link is Down", dev
->data
->port_id
);
1562 * Toggle the DPNI to disable, if not already disabled.
1563 * This is not strictly PHY up/down - it is more of logical toggling.
1566 dpaa2_dev_set_link_down(struct rte_eth_dev
*dev
)
1569 struct dpaa2_dev_priv
*priv
;
1570 struct fsl_mc_io
*dpni
;
1571 int dpni_enabled
= 0;
1574 PMD_INIT_FUNC_TRACE();
1576 priv
= dev
->data
->dev_private
;
1577 dpni
= (struct fsl_mc_io
*)priv
->hw
;
1580 DPAA2_PMD_ERR("Device has not yet been configured");
1584 /*changing tx burst function to avoid any more enqueues */
1585 dev
->tx_pkt_burst
= dummy_dev_tx
;
1587 /* Loop while dpni_disable() attempts to drain the egress FQs
1588 * and confirm them back to us.
1591 ret
= dpni_disable(dpni
, 0, priv
->token
);
1593 DPAA2_PMD_ERR("dpni disable failed (%d)", ret
);
1596 ret
= dpni_is_enabled(dpni
, 0, priv
->token
, &dpni_enabled
);
1598 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret
);
1602 /* Allow the MC some slack */
1603 rte_delay_us(100 * 1000);
1604 } while (dpni_enabled
&& --retries
);
1607 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1608 /* todo- we may have to manually cleanup queues.
1611 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1612 dev
->data
->port_id
);
1615 dev
->data
->dev_link
.link_status
= 0;
1621 dpaa2_flow_ctrl_get(struct rte_eth_dev
*dev
, struct rte_eth_fc_conf
*fc_conf
)
1624 struct dpaa2_dev_priv
*priv
;
1625 struct fsl_mc_io
*dpni
;
1626 struct dpni_link_state state
= {0};
1628 PMD_INIT_FUNC_TRACE();
1630 priv
= dev
->data
->dev_private
;
1631 dpni
= (struct fsl_mc_io
*)priv
->hw
;
1633 if (dpni
== NULL
|| fc_conf
== NULL
) {
1634 DPAA2_PMD_ERR("device not configured");
1638 ret
= dpni_get_link_state(dpni
, CMD_PRI_LOW
, priv
->token
, &state
);
1640 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret
);
1644 memset(fc_conf
, 0, sizeof(struct rte_eth_fc_conf
));
1645 if (state
.options
& DPNI_LINK_OPT_PAUSE
) {
1646 /* DPNI_LINK_OPT_PAUSE set
1647 * if ASYM_PAUSE not set,
1648 * RX Side flow control (handle received Pause frame)
1649 * TX side flow control (send Pause frame)
1650 * if ASYM_PAUSE set,
1651 * RX Side flow control (handle received Pause frame)
1652 * No TX side flow control (send Pause frame disabled)
1654 if (!(state
.options
& DPNI_LINK_OPT_ASYM_PAUSE
))
1655 fc_conf
->mode
= RTE_FC_FULL
;
1657 fc_conf
->mode
= RTE_FC_RX_PAUSE
;
1659 /* DPNI_LINK_OPT_PAUSE not set
1660 * if ASYM_PAUSE set,
1661 * TX side flow control (send Pause frame)
1662 * No RX side flow control (No action on pause frame rx)
1663 * if ASYM_PAUSE not set,
1664 * Flow control disabled
1666 if (state
.options
& DPNI_LINK_OPT_ASYM_PAUSE
)
1667 fc_conf
->mode
= RTE_FC_TX_PAUSE
;
1669 fc_conf
->mode
= RTE_FC_NONE
;
1676 dpaa2_flow_ctrl_set(struct rte_eth_dev
*dev
, struct rte_eth_fc_conf
*fc_conf
)
1679 struct dpaa2_dev_priv
*priv
;
1680 struct fsl_mc_io
*dpni
;
1681 struct dpni_link_state state
= {0};
1682 struct dpni_link_cfg cfg
= {0};
1684 PMD_INIT_FUNC_TRACE();
1686 priv
= dev
->data
->dev_private
;
1687 dpni
= (struct fsl_mc_io
*)priv
->hw
;
1690 DPAA2_PMD_ERR("dpni is NULL");
1694 /* It is necessary to obtain the current state before setting fc_conf
1695 * as MC would return error in case rate, autoneg or duplex values are
1698 ret
= dpni_get_link_state(dpni
, CMD_PRI_LOW
, priv
->token
, &state
);
1700 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret
);
1704 /* Disable link before setting configuration */
1705 dpaa2_dev_set_link_down(dev
);
1707 /* Based on fc_conf, update cfg */
1708 cfg
.rate
= state
.rate
;
1709 cfg
.options
= state
.options
;
1711 /* update cfg with fc_conf */
1712 switch (fc_conf
->mode
) {
1714 /* Full flow control;
1715 * OPT_PAUSE set, ASYM_PAUSE not set
1717 cfg
.options
|= DPNI_LINK_OPT_PAUSE
;
1718 cfg
.options
&= ~DPNI_LINK_OPT_ASYM_PAUSE
;
1720 case RTE_FC_TX_PAUSE
:
1721 /* Enable RX flow control
1722 * OPT_PAUSE not set;
1725 cfg
.options
|= DPNI_LINK_OPT_ASYM_PAUSE
;
1726 cfg
.options
&= ~DPNI_LINK_OPT_PAUSE
;
1728 case RTE_FC_RX_PAUSE
:
1729 /* Enable TX Flow control
1733 cfg
.options
|= DPNI_LINK_OPT_PAUSE
;
1734 cfg
.options
|= DPNI_LINK_OPT_ASYM_PAUSE
;
1737 /* Disable Flow control
1739 * ASYM_PAUSE not set
1741 cfg
.options
&= ~DPNI_LINK_OPT_PAUSE
;
1742 cfg
.options
&= ~DPNI_LINK_OPT_ASYM_PAUSE
;
1745 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1750 ret
= dpni_set_link_cfg(dpni
, CMD_PRI_LOW
, priv
->token
, &cfg
);
1752 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1756 dpaa2_dev_set_link_up(dev
);
1762 dpaa2_dev_rss_hash_update(struct rte_eth_dev
*dev
,
1763 struct rte_eth_rss_conf
*rss_conf
)
1765 struct rte_eth_dev_data
*data
= dev
->data
;
1766 struct rte_eth_conf
*eth_conf
= &data
->dev_conf
;
1769 PMD_INIT_FUNC_TRACE();
1771 if (rss_conf
->rss_hf
) {
1772 ret
= dpaa2_setup_flow_dist(dev
, rss_conf
->rss_hf
);
1774 DPAA2_PMD_ERR("Unable to set flow dist");
1778 ret
= dpaa2_remove_flow_dist(dev
, 0);
1780 DPAA2_PMD_ERR("Unable to remove flow dist");
1784 eth_conf
->rx_adv_conf
.rss_conf
.rss_hf
= rss_conf
->rss_hf
;
1789 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev
*dev
,
1790 struct rte_eth_rss_conf
*rss_conf
)
1792 struct rte_eth_dev_data
*data
= dev
->data
;
1793 struct rte_eth_conf
*eth_conf
= &data
->dev_conf
;
1795 /* dpaa2 does not support rss_key, so length should be 0*/
1796 rss_conf
->rss_key_len
= 0;
1797 rss_conf
->rss_hf
= eth_conf
->rx_adv_conf
.rss_conf
.rss_hf
;
1801 int dpaa2_eth_eventq_attach(const struct rte_eth_dev
*dev
,
1802 int eth_rx_queue_id
,
1804 const struct rte_event_eth_rx_adapter_queue_conf
*queue_conf
)
1806 struct dpaa2_dev_priv
*eth_priv
= dev
->data
->dev_private
;
1807 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)eth_priv
->hw
;
1808 struct dpaa2_queue
*dpaa2_ethq
= eth_priv
->rx_vq
[eth_rx_queue_id
];
1809 uint8_t flow_id
= dpaa2_ethq
->flow_id
;
1810 struct dpni_queue cfg
;
1814 if (queue_conf
->ev
.sched_type
== RTE_SCHED_TYPE_PARALLEL
)
1815 dpaa2_ethq
->cb
= dpaa2_dev_process_parallel_event
;
1816 else if (queue_conf
->ev
.sched_type
== RTE_SCHED_TYPE_ATOMIC
)
1817 dpaa2_ethq
->cb
= dpaa2_dev_process_atomic_event
;
1818 else if (queue_conf
->ev
.sched_type
== RTE_SCHED_TYPE_ORDERED
)
1819 dpaa2_ethq
->cb
= dpaa2_dev_process_ordered_event
;
1823 memset(&cfg
, 0, sizeof(struct dpni_queue
));
1824 options
= DPNI_QUEUE_OPT_DEST
;
1825 cfg
.destination
.type
= DPNI_DEST_DPCON
;
1826 cfg
.destination
.id
= dpcon_id
;
1827 cfg
.destination
.priority
= queue_conf
->ev
.priority
;
1829 if (queue_conf
->ev
.sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
1830 options
|= DPNI_QUEUE_OPT_HOLD_ACTIVE
;
1831 cfg
.destination
.hold_active
= 1;
1834 if (queue_conf
->ev
.sched_type
== RTE_SCHED_TYPE_ORDERED
&&
1835 !eth_priv
->en_ordered
) {
1836 struct opr_cfg ocfg
;
1838 /* Restoration window size = 256 frames */
1840 /* Restoration window size = 512 frames for LX2 */
1841 if (dpaa2_svr_family
== SVR_LX2160A
)
1843 /* Auto advance NESN window enabled */
1845 /* Late arrival window size disabled */
1847 /* ORL resource exhaustaion advance NESN disabled */
1849 /* Loose ordering enabled */
1851 eth_priv
->en_loose_ordered
= 1;
1852 /* Strict ordering enabled if explicitly set */
1853 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
1855 eth_priv
->en_loose_ordered
= 0;
1858 ret
= dpni_set_opr(dpni
, CMD_PRI_LOW
, eth_priv
->token
,
1859 dpaa2_ethq
->tc_index
, flow_id
,
1860 OPR_OPT_CREATE
, &ocfg
);
1862 DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret
);
1866 eth_priv
->en_ordered
= 1;
1869 options
|= DPNI_QUEUE_OPT_USER_CTX
;
1870 cfg
.user_context
= (size_t)(dpaa2_ethq
);
1872 ret
= dpni_set_queue(dpni
, CMD_PRI_LOW
, eth_priv
->token
, DPNI_QUEUE_RX
,
1873 dpaa2_ethq
->tc_index
, flow_id
, options
, &cfg
);
1875 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret
);
1879 memcpy(&dpaa2_ethq
->ev
, &queue_conf
->ev
, sizeof(struct rte_event
));
1884 int dpaa2_eth_eventq_detach(const struct rte_eth_dev
*dev
,
1885 int eth_rx_queue_id
)
1887 struct dpaa2_dev_priv
*eth_priv
= dev
->data
->dev_private
;
1888 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)eth_priv
->hw
;
1889 struct dpaa2_queue
*dpaa2_ethq
= eth_priv
->rx_vq
[eth_rx_queue_id
];
1890 uint8_t flow_id
= dpaa2_ethq
->flow_id
;
1891 struct dpni_queue cfg
;
1895 memset(&cfg
, 0, sizeof(struct dpni_queue
));
1896 options
= DPNI_QUEUE_OPT_DEST
;
1897 cfg
.destination
.type
= DPNI_DEST_NONE
;
1899 ret
= dpni_set_queue(dpni
, CMD_PRI_LOW
, eth_priv
->token
, DPNI_QUEUE_RX
,
1900 dpaa2_ethq
->tc_index
, flow_id
, options
, &cfg
);
1902 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret
);
1908 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op
)
1912 for (i
= 0; i
< RTE_DIM(dpaa2_supported_filter_ops
); i
++) {
1913 if (dpaa2_supported_filter_ops
[i
] == filter_op
)
1920 dpaa2_dev_flow_ctrl(struct rte_eth_dev
*dev
,
1921 enum rte_filter_type filter_type
,
1922 enum rte_filter_op filter_op
,
1930 switch (filter_type
) {
1931 case RTE_ETH_FILTER_GENERIC
:
1932 if (dpaa2_dev_verify_filter_ops(filter_op
) < 0) {
1936 *(const void **)arg
= &dpaa2_flow_ops
;
1937 dpaa2_filter_type
|= filter_type
;
1940 RTE_LOG(ERR
, PMD
, "Filter type (%d) not supported",
1948 static struct eth_dev_ops dpaa2_ethdev_ops
= {
1949 .dev_configure
= dpaa2_eth_dev_configure
,
1950 .dev_start
= dpaa2_dev_start
,
1951 .dev_stop
= dpaa2_dev_stop
,
1952 .dev_close
= dpaa2_dev_close
,
1953 .promiscuous_enable
= dpaa2_dev_promiscuous_enable
,
1954 .promiscuous_disable
= dpaa2_dev_promiscuous_disable
,
1955 .allmulticast_enable
= dpaa2_dev_allmulticast_enable
,
1956 .allmulticast_disable
= dpaa2_dev_allmulticast_disable
,
1957 .dev_set_link_up
= dpaa2_dev_set_link_up
,
1958 .dev_set_link_down
= dpaa2_dev_set_link_down
,
1959 .link_update
= dpaa2_dev_link_update
,
1960 .stats_get
= dpaa2_dev_stats_get
,
1961 .xstats_get
= dpaa2_dev_xstats_get
,
1962 .xstats_get_by_id
= dpaa2_xstats_get_by_id
,
1963 .xstats_get_names_by_id
= dpaa2_xstats_get_names_by_id
,
1964 .xstats_get_names
= dpaa2_xstats_get_names
,
1965 .stats_reset
= dpaa2_dev_stats_reset
,
1966 .xstats_reset
= dpaa2_dev_stats_reset
,
1967 .fw_version_get
= dpaa2_fw_version_get
,
1968 .dev_infos_get
= dpaa2_dev_info_get
,
1969 .dev_supported_ptypes_get
= dpaa2_supported_ptypes_get
,
1970 .mtu_set
= dpaa2_dev_mtu_set
,
1971 .vlan_filter_set
= dpaa2_vlan_filter_set
,
1972 .vlan_offload_set
= dpaa2_vlan_offload_set
,
1973 .vlan_tpid_set
= dpaa2_vlan_tpid_set
,
1974 .rx_queue_setup
= dpaa2_dev_rx_queue_setup
,
1975 .rx_queue_release
= dpaa2_dev_rx_queue_release
,
1976 .tx_queue_setup
= dpaa2_dev_tx_queue_setup
,
1977 .tx_queue_release
= dpaa2_dev_tx_queue_release
,
1978 .rx_queue_count
= dpaa2_dev_rx_queue_count
,
1979 .flow_ctrl_get
= dpaa2_flow_ctrl_get
,
1980 .flow_ctrl_set
= dpaa2_flow_ctrl_set
,
1981 .mac_addr_add
= dpaa2_dev_add_mac_addr
,
1982 .mac_addr_remove
= dpaa2_dev_remove_mac_addr
,
1983 .mac_addr_set
= dpaa2_dev_set_mac_addr
,
1984 .rss_hash_update
= dpaa2_dev_rss_hash_update
,
1985 .rss_hash_conf_get
= dpaa2_dev_rss_hash_conf_get
,
1986 .filter_ctrl
= dpaa2_dev_flow_ctrl
,
1989 /* Populate the mac address from physically available (u-boot/firmware) and/or
1990 * one set by higher layers like MC (restool) etc.
1991 * Returns the table of MAC entries (multiple entries)
1994 populate_mac_addr(struct fsl_mc_io
*dpni_dev
, struct dpaa2_dev_priv
*priv
,
1995 struct ether_addr
*mac_entry
)
1998 struct ether_addr phy_mac
, prime_mac
;
2000 memset(&phy_mac
, 0, sizeof(struct ether_addr
));
2001 memset(&prime_mac
, 0, sizeof(struct ether_addr
));
2003 /* Get the physical device MAC address */
2004 ret
= dpni_get_port_mac_addr(dpni_dev
, CMD_PRI_LOW
, priv
->token
,
2005 phy_mac
.addr_bytes
);
2007 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret
);
2011 ret
= dpni_get_primary_mac_addr(dpni_dev
, CMD_PRI_LOW
, priv
->token
,
2012 prime_mac
.addr_bytes
);
2014 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret
);
2018 /* Now that both MAC have been obtained, do:
2019 * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2021 * If empty_mac(phy), return prime.
2022 * if both are empty, create random MAC, set as prime and return
2024 if (!is_zero_ether_addr(&phy_mac
)) {
2025 /* If the addresses are not same, overwrite prime */
2026 if (!is_same_ether_addr(&phy_mac
, &prime_mac
)) {
2027 ret
= dpni_set_primary_mac_addr(dpni_dev
, CMD_PRI_LOW
,
2029 phy_mac
.addr_bytes
);
2031 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2035 memcpy(&prime_mac
, &phy_mac
, sizeof(struct ether_addr
));
2037 } else if (is_zero_ether_addr(&prime_mac
)) {
2038 /* In case phys and prime, both are zero, create random MAC */
2039 eth_random_addr(prime_mac
.addr_bytes
);
2040 ret
= dpni_set_primary_mac_addr(dpni_dev
, CMD_PRI_LOW
,
2042 prime_mac
.addr_bytes
);
2044 DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret
);
2049 /* prime_mac the final MAC address */
2050 memcpy(mac_entry
, &prime_mac
, sizeof(struct ether_addr
));
2058 check_devargs_handler(__rte_unused
const char *key
, const char *value
,
2059 __rte_unused
void *opaque
)
2061 if (strcmp(value
, "1"))
2068 dpaa2_get_devargs(struct rte_devargs
*devargs
, const char *key
)
2070 struct rte_kvargs
*kvlist
;
2075 kvlist
= rte_kvargs_parse(devargs
->args
, NULL
);
2079 if (!rte_kvargs_count(kvlist
, key
)) {
2080 rte_kvargs_free(kvlist
);
2084 if (rte_kvargs_process(kvlist
, key
,
2085 check_devargs_handler
, NULL
) < 0) {
2086 rte_kvargs_free(kvlist
);
2089 rte_kvargs_free(kvlist
);
2095 dpaa2_dev_init(struct rte_eth_dev
*eth_dev
)
2097 struct rte_device
*dev
= eth_dev
->device
;
2098 struct rte_dpaa2_device
*dpaa2_dev
;
2099 struct fsl_mc_io
*dpni_dev
;
2100 struct dpni_attr attr
;
2101 struct dpaa2_dev_priv
*priv
= eth_dev
->data
->dev_private
;
2102 struct dpni_buffer_layout layout
;
2105 PMD_INIT_FUNC_TRACE();
2107 /* For secondary processes, the primary has done all the work */
2108 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
2109 /* In case of secondary, only burst and ops API need to be
2112 eth_dev
->dev_ops
= &dpaa2_ethdev_ops
;
2113 if (dpaa2_get_devargs(dev
->devargs
, DRIVER_LOOPBACK_MODE
))
2114 eth_dev
->rx_pkt_burst
= dpaa2_dev_loopback_rx
;
2116 eth_dev
->rx_pkt_burst
= dpaa2_dev_prefetch_rx
;
2117 eth_dev
->tx_pkt_burst
= dpaa2_dev_tx
;
2121 dpaa2_dev
= container_of(dev
, struct rte_dpaa2_device
, device
);
2123 hw_id
= dpaa2_dev
->object_id
;
2125 dpni_dev
= rte_malloc(NULL
, sizeof(struct fsl_mc_io
), 0);
2127 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2131 dpni_dev
->regs
= rte_mcp_ptr_list
[0];
2132 ret
= dpni_open(dpni_dev
, CMD_PRI_LOW
, hw_id
, &priv
->token
);
2135 "Failure in opening dpni@%d with err code %d",
2141 /* Clean the device first */
2142 ret
= dpni_reset(dpni_dev
, CMD_PRI_LOW
, priv
->token
);
2144 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2149 ret
= dpni_get_attributes(dpni_dev
, CMD_PRI_LOW
, priv
->token
, &attr
);
2152 "Failure in get dpni@%d attribute, err code %d",
2157 priv
->num_rx_tc
= attr
.num_rx_tcs
;
2159 for (i
= 0; i
< attr
.num_rx_tcs
; i
++)
2160 priv
->nb_rx_queues
+= attr
.num_queues
;
2162 /* Using number of TX queues as number of TX TCs */
2163 priv
->nb_tx_queues
= attr
.num_tx_tcs
;
2165 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
2166 priv
->num_rx_tc
, priv
->nb_rx_queues
,
2167 priv
->nb_tx_queues
);
2169 priv
->hw
= dpni_dev
;
2170 priv
->hw_id
= hw_id
;
2171 priv
->options
= attr
.options
;
2172 priv
->max_mac_filters
= attr
.mac_filter_entries
;
2173 priv
->max_vlan_filters
= attr
.vlan_filter_entries
;
2176 /* Allocate memory for hardware structure for queues */
2177 ret
= dpaa2_alloc_rx_tx_queues(eth_dev
);
2179 DPAA2_PMD_ERR("Queue allocation Failed");
2183 /* Allocate memory for storing MAC addresses.
2184 * Table of mac_filter_entries size is allocated so that RTE ether lib
2185 * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2187 eth_dev
->data
->mac_addrs
= rte_zmalloc("dpni",
2188 ETHER_ADDR_LEN
* attr
.mac_filter_entries
, 0);
2189 if (eth_dev
->data
->mac_addrs
== NULL
) {
2191 "Failed to allocate %d bytes needed to store MAC addresses",
2192 ETHER_ADDR_LEN
* attr
.mac_filter_entries
);
2197 ret
= populate_mac_addr(dpni_dev
, priv
, ð_dev
->data
->mac_addrs
[0]);
2199 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2200 rte_free(eth_dev
->data
->mac_addrs
);
2201 eth_dev
->data
->mac_addrs
= NULL
;
2205 /* ... tx buffer layout ... */
2206 memset(&layout
, 0, sizeof(struct dpni_buffer_layout
));
2207 layout
.options
= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
;
2208 layout
.pass_frame_status
= 1;
2209 ret
= dpni_set_buffer_layout(dpni_dev
, CMD_PRI_LOW
, priv
->token
,
2210 DPNI_QUEUE_TX
, &layout
);
2212 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret
);
2216 /* ... tx-conf and error buffer layout ... */
2217 memset(&layout
, 0, sizeof(struct dpni_buffer_layout
));
2218 layout
.options
= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
;
2219 layout
.pass_frame_status
= 1;
2220 ret
= dpni_set_buffer_layout(dpni_dev
, CMD_PRI_LOW
, priv
->token
,
2221 DPNI_QUEUE_TX_CONFIRM
, &layout
);
2223 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2228 eth_dev
->dev_ops
= &dpaa2_ethdev_ops
;
2230 if (dpaa2_get_devargs(dev
->devargs
, DRIVER_LOOPBACK_MODE
)) {
2231 eth_dev
->rx_pkt_burst
= dpaa2_dev_loopback_rx
;
2232 DPAA2_PMD_INFO("Loopback mode");
2234 eth_dev
->rx_pkt_burst
= dpaa2_dev_prefetch_rx
;
2236 eth_dev
->tx_pkt_burst
= dpaa2_dev_tx
;
2238 /*Init fields w.r.t. classficaition*/
2239 memset(&priv
->extract
.qos_key_cfg
, 0, sizeof(struct dpkg_profile_cfg
));
2240 priv
->extract
.qos_extract_param
= (size_t)rte_malloc(NULL
, 256, 64);
2241 if (!priv
->extract
.qos_extract_param
) {
2242 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2243 " classificaiton ", ret
);
2246 for (i
= 0; i
< MAX_TCS
; i
++) {
2247 memset(&priv
->extract
.fs_key_cfg
[i
], 0,
2248 sizeof(struct dpkg_profile_cfg
));
2249 priv
->extract
.fs_extract_param
[i
] =
2250 (size_t)rte_malloc(NULL
, 256, 64);
2251 if (!priv
->extract
.fs_extract_param
[i
]) {
2252 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2258 RTE_LOG(INFO
, PMD
, "%s: netdev created\n", eth_dev
->data
->name
);
2261 dpaa2_dev_uninit(eth_dev
);
2266 dpaa2_dev_uninit(struct rte_eth_dev
*eth_dev
)
2268 struct dpaa2_dev_priv
*priv
= eth_dev
->data
->dev_private
;
2269 struct fsl_mc_io
*dpni
= (struct fsl_mc_io
*)priv
->hw
;
2272 PMD_INIT_FUNC_TRACE();
2274 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
2278 DPAA2_PMD_WARN("Already closed or not started");
2282 dpaa2_dev_close(eth_dev
);
2284 dpaa2_free_rx_tx_queues(eth_dev
);
2286 /* Close the device at underlying layer*/
2287 ret
= dpni_close(dpni
, CMD_PRI_LOW
, priv
->token
);
2290 "Failure closing dpni device with err code %d",
2294 /* Free the allocated memory for ethernet private data and dpni*/
2298 for (i
= 0; i
< MAX_TCS
; i
++) {
2299 if (priv
->extract
.fs_extract_param
[i
])
2300 rte_free((void *)(size_t)priv
->extract
.fs_extract_param
[i
]);
2303 if (priv
->extract
.qos_extract_param
)
2304 rte_free((void *)(size_t)priv
->extract
.qos_extract_param
);
2306 eth_dev
->dev_ops
= NULL
;
2307 eth_dev
->rx_pkt_burst
= NULL
;
2308 eth_dev
->tx_pkt_burst
= NULL
;
2310 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev
->data
->name
);
2315 rte_dpaa2_probe(struct rte_dpaa2_driver
*dpaa2_drv
,
2316 struct rte_dpaa2_device
*dpaa2_dev
)
2318 struct rte_eth_dev
*eth_dev
;
2321 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
2322 eth_dev
= rte_eth_dev_allocate(dpaa2_dev
->device
.name
);
2325 eth_dev
->data
->dev_private
= rte_zmalloc(
2326 "ethdev private structure",
2327 sizeof(struct dpaa2_dev_priv
),
2328 RTE_CACHE_LINE_SIZE
);
2329 if (eth_dev
->data
->dev_private
== NULL
) {
2331 "Unable to allocate memory for private data");
2332 rte_eth_dev_release_port(eth_dev
);
2336 eth_dev
= rte_eth_dev_attach_secondary(dpaa2_dev
->device
.name
);
2341 eth_dev
->device
= &dpaa2_dev
->device
;
2343 dpaa2_dev
->eth_dev
= eth_dev
;
2344 eth_dev
->data
->rx_mbuf_alloc_failed
= 0;
2346 if (dpaa2_drv
->drv_flags
& RTE_DPAA2_DRV_INTR_LSC
)
2347 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_INTR_LSC
;
2349 /* Invoke PMD device initialization function */
2350 diag
= dpaa2_dev_init(eth_dev
);
2352 rte_eth_dev_probing_finish(eth_dev
);
2356 rte_eth_dev_release_port(eth_dev
);
2361 rte_dpaa2_remove(struct rte_dpaa2_device
*dpaa2_dev
)
2363 struct rte_eth_dev
*eth_dev
;
2365 eth_dev
= dpaa2_dev
->eth_dev
;
2366 dpaa2_dev_uninit(eth_dev
);
2368 rte_eth_dev_release_port(eth_dev
);
2373 static struct rte_dpaa2_driver rte_dpaa2_pmd
= {
2374 .drv_flags
= RTE_DPAA2_DRV_INTR_LSC
| RTE_DPAA2_DRV_IOVA_AS_VA
,
2375 .drv_type
= DPAA2_ETH
,
2376 .probe
= rte_dpaa2_probe
,
2377 .remove
= rte_dpaa2_remove
,
2380 RTE_PMD_REGISTER_DPAA2(net_dpaa2
, rte_dpaa2_pmd
);
2381 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2
,
2382 DRIVER_LOOPBACK_MODE
"=<int>");
2383 RTE_INIT(dpaa2_pmd_init_log
)
2385 dpaa2_logtype_pmd
= rte_log_register("pmd.net.dpaa2");
2386 if (dpaa2_logtype_pmd
>= 0)
2387 rte_log_set_level(dpaa2_logtype_pmd
, RTE_LOG_NOTICE
);