2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 static bool gro_disable
= 1; /* mod_param */
13 static inline int qede_alloc_rx_buffer(struct qede_rx_queue
*rxq
)
15 struct rte_mbuf
*new_mb
= NULL
;
16 struct eth_rx_bd
*rx_bd
;
18 uint16_t idx
= rxq
->sw_rx_prod
& NUM_RX_BDS(rxq
);
20 new_mb
= rte_mbuf_raw_alloc(rxq
->mb_pool
);
21 if (unlikely(!new_mb
)) {
23 "Failed to allocate rx buffer "
24 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
25 idx
, rxq
->sw_rx_cons
& NUM_RX_BDS(rxq
),
26 rte_mempool_avail_count(rxq
->mb_pool
),
27 rte_mempool_in_use_count(rxq
->mb_pool
));
30 rxq
->sw_rx_ring
[idx
].mbuf
= new_mb
;
31 rxq
->sw_rx_ring
[idx
].page_offset
= 0;
32 mapping
= rte_mbuf_data_dma_addr_default(new_mb
);
33 /* Advance PROD and get BD pointer */
34 rx_bd
= (struct eth_rx_bd
*)ecore_chain_produce(&rxq
->rx_bd_ring
);
35 rx_bd
->addr
.hi
= rte_cpu_to_le_32(U64_HI(mapping
));
36 rx_bd
->addr
.lo
= rte_cpu_to_le_32(U64_LO(mapping
));
41 static void qede_rx_queue_release_mbufs(struct qede_rx_queue
*rxq
)
45 if (rxq
->sw_rx_ring
!= NULL
) {
46 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
47 if (rxq
->sw_rx_ring
[i
].mbuf
!= NULL
) {
48 rte_pktmbuf_free(rxq
->sw_rx_ring
[i
].mbuf
);
49 rxq
->sw_rx_ring
[i
].mbuf
= NULL
;
55 void qede_rx_queue_release(void *rx_queue
)
57 struct qede_rx_queue
*rxq
= rx_queue
;
60 qede_rx_queue_release_mbufs(rxq
);
61 rte_free(rxq
->sw_rx_ring
);
62 rxq
->sw_rx_ring
= NULL
;
68 static void qede_tx_queue_release_mbufs(struct qede_tx_queue
*txq
)
72 PMD_TX_LOG(DEBUG
, txq
, "releasing %u mbufs\n", txq
->nb_tx_desc
);
74 if (txq
->sw_tx_ring
) {
75 for (i
= 0; i
< txq
->nb_tx_desc
; i
++) {
76 if (txq
->sw_tx_ring
[i
].mbuf
) {
77 rte_pktmbuf_free(txq
->sw_tx_ring
[i
].mbuf
);
78 txq
->sw_tx_ring
[i
].mbuf
= NULL
;
85 qede_rx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
86 uint16_t nb_desc
, unsigned int socket_id
,
87 const struct rte_eth_rxconf
*rx_conf
,
88 struct rte_mempool
*mp
)
90 struct qede_dev
*qdev
= dev
->data
->dev_private
;
91 struct ecore_dev
*edev
= &qdev
->edev
;
92 struct rte_eth_dev_data
*eth_data
= dev
->data
;
93 struct qede_rx_queue
*rxq
;
94 uint16_t pkt_len
= (uint16_t)dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
;
100 PMD_INIT_FUNC_TRACE(edev
);
102 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
103 if (!rte_is_power_of_2(nb_desc
)) {
104 DP_ERR(edev
, "Ring size %u is not power of 2\n",
109 /* Free memory prior to re-allocation if needed... */
110 if (dev
->data
->rx_queues
[queue_idx
] != NULL
) {
111 qede_rx_queue_release(dev
->data
->rx_queues
[queue_idx
]);
112 dev
->data
->rx_queues
[queue_idx
] = NULL
;
115 /* First allocate the rx queue data structure */
116 rxq
= rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue
),
117 RTE_CACHE_LINE_SIZE
, socket_id
);
120 DP_ERR(edev
, "Unable to allocate memory for rxq on socket %u",
127 rxq
->nb_rx_desc
= nb_desc
;
128 rxq
->queue_id
= queue_idx
;
129 rxq
->port_id
= dev
->data
->port_id
;
132 data_size
= (uint16_t)rte_pktmbuf_data_room_size(mp
) -
133 RTE_PKTMBUF_HEADROOM
;
135 if (pkt_len
> data_size
&& !dev
->data
->scattered_rx
) {
136 DP_ERR(edev
, "MTU %u should not exceed dataroom %u\n",
142 if (dev
->data
->scattered_rx
)
143 rxq
->rx_buf_size
= data_size
;
145 rxq
->rx_buf_size
= pkt_len
+ QEDE_ETH_OVERHEAD
;
149 DP_INFO(edev
, "MTU = %u ; RX buffer = %u\n",
150 qdev
->mtu
, rxq
->rx_buf_size
);
152 if (pkt_len
> ETHER_MAX_LEN
) {
153 dev
->data
->dev_conf
.rxmode
.jumbo_frame
= 1;
154 DP_NOTICE(edev
, false, "jumbo frame enabled\n");
156 dev
->data
->dev_conf
.rxmode
.jumbo_frame
= 0;
159 /* Allocate the parallel driver ring for Rx buffers */
160 size
= sizeof(*rxq
->sw_rx_ring
) * rxq
->nb_rx_desc
;
161 rxq
->sw_rx_ring
= rte_zmalloc_socket("sw_rx_ring", size
,
162 RTE_CACHE_LINE_SIZE
, socket_id
);
163 if (!rxq
->sw_rx_ring
) {
164 DP_NOTICE(edev
, false,
165 "Unable to alloc memory for sw_rx_ring on socket %u\n",
172 /* Allocate FW Rx ring */
173 rc
= qdev
->ops
->common
->chain_alloc(edev
,
174 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE
,
175 ECORE_CHAIN_MODE_NEXT_PTR
,
176 ECORE_CHAIN_CNT_TYPE_U16
,
178 sizeof(struct eth_rx_bd
),
181 if (rc
!= ECORE_SUCCESS
) {
182 DP_NOTICE(edev
, false,
183 "Unable to alloc memory for rxbd ring on socket %u\n",
185 rte_free(rxq
->sw_rx_ring
);
186 rxq
->sw_rx_ring
= NULL
;
192 /* Allocate FW completion ring */
193 rc
= qdev
->ops
->common
->chain_alloc(edev
,
194 ECORE_CHAIN_USE_TO_CONSUME
,
195 ECORE_CHAIN_MODE_PBL
,
196 ECORE_CHAIN_CNT_TYPE_U16
,
198 sizeof(union eth_rx_cqe
),
201 if (rc
!= ECORE_SUCCESS
) {
202 DP_NOTICE(edev
, false,
203 "Unable to alloc memory for cqe ring on socket %u\n",
205 /* TBD: Freeing RX BD ring */
206 rte_free(rxq
->sw_rx_ring
);
207 rxq
->sw_rx_ring
= NULL
;
212 /* Allocate buffers for the Rx ring */
213 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
214 rc
= qede_alloc_rx_buffer(rxq
);
216 DP_NOTICE(edev
, false,
217 "RX buffer allocation failed at idx=%d\n", i
);
222 dev
->data
->rx_queues
[queue_idx
] = rxq
;
224 DP_INFO(edev
, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
225 queue_idx
, nb_desc
, qdev
->mtu
, socket_id
);
229 qede_rx_queue_release(rxq
);
233 void qede_tx_queue_release(void *tx_queue
)
235 struct qede_tx_queue
*txq
= tx_queue
;
238 qede_tx_queue_release_mbufs(txq
);
239 if (txq
->sw_tx_ring
) {
240 rte_free(txq
->sw_tx_ring
);
241 txq
->sw_tx_ring
= NULL
;
249 qede_tx_queue_setup(struct rte_eth_dev
*dev
,
252 unsigned int socket_id
,
253 const struct rte_eth_txconf
*tx_conf
)
255 struct qede_dev
*qdev
= dev
->data
->dev_private
;
256 struct ecore_dev
*edev
= &qdev
->edev
;
257 struct qede_tx_queue
*txq
;
260 PMD_INIT_FUNC_TRACE(edev
);
262 if (!rte_is_power_of_2(nb_desc
)) {
263 DP_ERR(edev
, "Ring size %u is not power of 2\n",
268 /* Free memory prior to re-allocation if needed... */
269 if (dev
->data
->tx_queues
[queue_idx
] != NULL
) {
270 qede_tx_queue_release(dev
->data
->tx_queues
[queue_idx
]);
271 dev
->data
->tx_queues
[queue_idx
] = NULL
;
274 txq
= rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue
),
275 RTE_CACHE_LINE_SIZE
, socket_id
);
279 "Unable to allocate memory for txq on socket %u",
284 txq
->nb_tx_desc
= nb_desc
;
286 txq
->port_id
= dev
->data
->port_id
;
288 rc
= qdev
->ops
->common
->chain_alloc(edev
,
289 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE
,
290 ECORE_CHAIN_MODE_PBL
,
291 ECORE_CHAIN_CNT_TYPE_U16
,
293 sizeof(union eth_tx_bd_types
),
295 if (rc
!= ECORE_SUCCESS
) {
297 "Unable to allocate memory for txbd ring on socket %u",
299 qede_tx_queue_release(txq
);
303 /* Allocate software ring */
304 txq
->sw_tx_ring
= rte_zmalloc_socket("txq->sw_tx_ring",
305 (sizeof(struct qede_tx_entry
) *
307 RTE_CACHE_LINE_SIZE
, socket_id
);
309 if (!txq
->sw_tx_ring
) {
311 "Unable to allocate memory for txbd ring on socket %u",
313 qede_tx_queue_release(txq
);
317 txq
->queue_id
= queue_idx
;
319 txq
->nb_tx_avail
= txq
->nb_tx_desc
;
321 txq
->tx_free_thresh
=
322 tx_conf
->tx_free_thresh
? tx_conf
->tx_free_thresh
:
323 (txq
->nb_tx_desc
- QEDE_DEFAULT_TX_FREE_THRESH
);
325 dev
->data
->tx_queues
[queue_idx
] = txq
;
328 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
329 queue_idx
, nb_desc
, txq
->tx_free_thresh
, socket_id
);
334 /* This function inits fp content and resets the SB, RXQ and TXQ arrays */
335 static void qede_init_fp(struct qede_dev
*qdev
)
337 struct qede_fastpath
*fp
;
338 uint8_t i
, rss_id
, tc
;
339 int fp_rx
= qdev
->fp_num_rx
, rxq
= 0, txq
= 0;
341 memset((void *)qdev
->fp_array
, 0, (QEDE_QUEUE_CNT(qdev
) *
342 sizeof(*qdev
->fp_array
)));
343 memset((void *)qdev
->sb_array
, 0, (QEDE_QUEUE_CNT(qdev
) *
344 sizeof(*qdev
->sb_array
)));
346 fp
= &qdev
->fp_array
[i
];
348 fp
->type
= QEDE_FASTPATH_RX
;
351 fp
->type
= QEDE_FASTPATH_TX
;
355 fp
->sb_info
= &qdev
->sb_array
[i
];
356 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d", "qdev", i
);
359 qdev
->gro_disable
= gro_disable
;
362 void qede_free_fp_arrays(struct qede_dev
*qdev
)
364 /* It asseumes qede_free_mem_load() is called before */
365 if (qdev
->fp_array
!= NULL
) {
366 rte_free(qdev
->fp_array
);
367 qdev
->fp_array
= NULL
;
370 if (qdev
->sb_array
!= NULL
) {
371 rte_free(qdev
->sb_array
);
372 qdev
->sb_array
= NULL
;
376 int qede_alloc_fp_array(struct qede_dev
*qdev
)
378 struct qede_fastpath
*fp
;
379 struct ecore_dev
*edev
= &qdev
->edev
;
382 qdev
->fp_array
= rte_calloc("fp", QEDE_QUEUE_CNT(qdev
),
383 sizeof(*qdev
->fp_array
),
384 RTE_CACHE_LINE_SIZE
);
386 if (!qdev
->fp_array
) {
387 DP_ERR(edev
, "fp array allocation failed\n");
391 qdev
->sb_array
= rte_calloc("sb", QEDE_QUEUE_CNT(qdev
),
392 sizeof(*qdev
->sb_array
),
393 RTE_CACHE_LINE_SIZE
);
395 if (!qdev
->sb_array
) {
396 DP_ERR(edev
, "sb array allocation failed\n");
397 rte_free(qdev
->fp_array
);
404 /* This function allocates fast-path status block memory */
406 qede_alloc_mem_sb(struct qede_dev
*qdev
, struct ecore_sb_info
*sb_info
,
409 struct ecore_dev
*edev
= &qdev
->edev
;
410 struct status_block
*sb_virt
;
414 sb_virt
= OSAL_DMA_ALLOC_COHERENT(edev
, &sb_phys
, sizeof(*sb_virt
));
417 DP_ERR(edev
, "Status block allocation failed\n");
421 rc
= qdev
->ops
->common
->sb_init(edev
, sb_info
,
422 sb_virt
, sb_phys
, sb_id
,
423 QED_SB_TYPE_L2_QUEUE
);
425 DP_ERR(edev
, "Status block initialization failed\n");
426 /* TBD: No dma_free_coherent possible */
433 int qede_alloc_fp_resc(struct qede_dev
*qdev
)
435 struct ecore_dev
*edev
= &qdev
->edev
;
436 struct qede_fastpath
*fp
;
441 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev
), &num_sbs
);
443 num_sbs
= (ecore_cxt_get_proto_cid_count
444 (ECORE_LEADING_HWFN(edev
), PROTOCOLID_ETH
, NULL
)) / 2;
447 DP_ERR(edev
, "No status blocks available\n");
452 qede_free_fp_arrays(qdev
);
454 rc
= qede_alloc_fp_array(qdev
);
460 for (i
= 0; i
< QEDE_QUEUE_CNT(qdev
); i
++) {
461 fp
= &qdev
->fp_array
[i
];
462 if (qede_alloc_mem_sb(qdev
, fp
->sb_info
, i
% num_sbs
)) {
463 qede_free_fp_arrays(qdev
);
471 void qede_dealloc_fp_resc(struct rte_eth_dev
*eth_dev
)
473 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
475 qede_free_mem_load(eth_dev
);
476 qede_free_fp_arrays(qdev
);
480 qede_update_rx_prod(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
482 uint16_t bd_prod
= ecore_chain_get_prod_idx(&rxq
->rx_bd_ring
);
483 uint16_t cqe_prod
= ecore_chain_get_prod_idx(&rxq
->rx_comp_ring
);
484 struct eth_rx_prod_data rx_prods
= { 0 };
486 /* Update producers */
487 rx_prods
.bd_prod
= rte_cpu_to_le_16(bd_prod
);
488 rx_prods
.cqe_prod
= rte_cpu_to_le_16(cqe_prod
);
490 /* Make sure that the BD and SGE data is updated before updating the
491 * producers since FW might read the BD/SGE right after the producer
496 internal_ram_wr(rxq
->hw_rxq_prod_addr
, sizeof(rx_prods
),
497 (uint32_t *)&rx_prods
);
499 /* mmiowb is needed to synchronize doorbell writes from more than one
500 * processor. It guarantees that the write arrives to the device before
501 * the napi lock is released and another qede_poll is called (possibly
502 * on another CPU). Without this barrier, the next doorbell can bypass
503 * this doorbell. This is applicable to IA64/Altix systems.
507 PMD_RX_LOG(DEBUG
, rxq
, "bd_prod %u cqe_prod %u\n", bd_prod
, cqe_prod
);
510 static inline uint32_t
511 qede_rxfh_indir_default(uint32_t index
, uint32_t n_rx_rings
)
513 return index
% n_rx_rings
;
516 static void qede_prandom_bytes(uint32_t *buff
, size_t bytes
)
520 srand((unsigned int)time(NULL
));
522 for (i
= 0; i
< ECORE_RSS_KEY_SIZE
; i
++)
527 qede_check_vport_rss_enable(struct rte_eth_dev
*eth_dev
,
528 struct qed_update_vport_rss_params
*rss_params
)
530 struct rte_eth_rss_conf rss_conf
;
531 enum rte_eth_rx_mq_mode mode
= eth_dev
->data
->dev_conf
.rxmode
.mq_mode
;
532 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
533 struct ecore_dev
*edev
= &qdev
->edev
;
539 PMD_INIT_FUNC_TRACE(edev
);
541 rss_conf
= eth_dev
->data
->dev_conf
.rx_adv_conf
.rss_conf
;
542 key
= (uint32_t *)rss_conf
.rss_key
;
543 hf
= rss_conf
.rss_hf
;
545 /* Check if RSS conditions are met.
546 * Note: Even though its meaningless to enable RSS with one queue, it
547 * could be used to produce RSS Hash, so skipping that check.
549 if (!(mode
& ETH_MQ_RX_RSS
)) {
550 DP_INFO(edev
, "RSS flag is not set\n");
555 DP_INFO(edev
, "Request to disable RSS\n");
559 memset(rss_params
, 0, sizeof(*rss_params
));
561 for (i
= 0; i
< ECORE_RSS_IND_TABLE_SIZE
; i
++)
562 rss_params
->rss_ind_table
[i
] = qede_rxfh_indir_default(i
,
563 QEDE_RSS_COUNT(qdev
));
566 qede_prandom_bytes(rss_params
->rss_key
,
567 sizeof(rss_params
->rss_key
));
569 memcpy(rss_params
->rss_key
, rss_conf
.rss_key
,
570 rss_conf
.rss_key_len
);
572 qede_init_rss_caps(&rss_caps
, hf
);
574 rss_params
->rss_caps
= rss_caps
;
576 DP_INFO(edev
, "RSS conditions are met\n");
581 static int qede_start_queues(struct rte_eth_dev
*eth_dev
, bool clear_stats
)
583 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
584 struct ecore_dev
*edev
= &qdev
->edev
;
585 struct ecore_queue_start_common_params q_params
;
586 struct qed_update_vport_rss_params
*rss_params
= &qdev
->rss_params
;
587 struct qed_dev_info
*qed_info
= &qdev
->dev_info
.common
;
588 struct qed_update_vport_params vport_update_params
;
589 struct qede_tx_queue
*txq
;
590 struct qede_fastpath
*fp
;
591 dma_addr_t p_phys_table
;
594 int vlan_removal_en
= 1;
598 fp
= &qdev
->fp_array
[i
];
599 if (fp
->type
& QEDE_FASTPATH_RX
) {
600 p_phys_table
= ecore_chain_get_pbl_phys(&fp
->rxq
->
602 page_cnt
= ecore_chain_get_page_cnt(&fp
->rxq
->
605 memset(&q_params
, 0, sizeof(q_params
));
606 q_params
.queue_id
= i
;
607 q_params
.vport_id
= 0;
608 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
609 q_params
.sb_idx
= RX_PI
;
611 ecore_sb_ack(fp
->sb_info
, IGU_INT_DISABLE
, 0);
613 rc
= qdev
->ops
->q_rx_start(edev
, i
, &q_params
,
614 fp
->rxq
->rx_buf_size
,
615 fp
->rxq
->rx_bd_ring
.p_phys_addr
,
618 &fp
->rxq
->hw_rxq_prod_addr
);
620 DP_ERR(edev
, "Start rxq #%d failed %d\n",
621 fp
->rxq
->queue_id
, rc
);
625 fp
->rxq
->hw_cons_ptr
=
626 &fp
->sb_info
->sb_virt
->pi_array
[RX_PI
];
628 qede_update_rx_prod(qdev
, fp
->rxq
);
631 if (!(fp
->type
& QEDE_FASTPATH_TX
))
633 for (tc
= 0; tc
< qdev
->num_tc
; tc
++) {
635 txq_index
= tc
* QEDE_RSS_COUNT(qdev
) + i
;
637 p_phys_table
= ecore_chain_get_pbl_phys(&txq
->tx_pbl
);
638 page_cnt
= ecore_chain_get_page_cnt(&txq
->tx_pbl
);
640 memset(&q_params
, 0, sizeof(q_params
));
641 q_params
.queue_id
= txq
->queue_id
;
642 q_params
.vport_id
= 0;
643 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
644 q_params
.sb_idx
= TX_PI(tc
);
646 rc
= qdev
->ops
->q_tx_start(edev
, i
, &q_params
,
648 page_cnt
, /* **pp_doorbell */
649 &txq
->doorbell_addr
);
651 DP_ERR(edev
, "Start txq %u failed %d\n",
657 &fp
->sb_info
->sb_virt
->pi_array
[TX_PI(tc
)];
658 SET_FIELD(txq
->tx_db
.data
.params
,
659 ETH_DB_DATA_DEST
, DB_DEST_XCM
);
660 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_CMD
,
662 SET_FIELD(txq
->tx_db
.data
.params
,
663 ETH_DB_DATA_AGG_VAL_SEL
,
664 DQ_XCM_ETH_TX_BD_PROD_CMD
);
666 txq
->tx_db
.data
.agg_flags
= DQ_XCM_ETH_DQ_CF_CMD
;
670 /* Prepare and send the vport enable */
671 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
672 /* Update MTU via vport update */
673 vport_update_params
.mtu
= qdev
->mtu
;
674 vport_update_params
.vport_id
= 0;
675 vport_update_params
.update_vport_active_flg
= 1;
676 vport_update_params
.vport_active_flg
= 1;
679 if (qed_info
->mf_mode
== MF_NPAR
&& qed_info
->tx_switching
) {
680 /* TBD: Check SRIOV enabled for VF */
681 vport_update_params
.update_tx_switching_flg
= 1;
682 vport_update_params
.tx_switching_flg
= 1;
685 if (qede_check_vport_rss_enable(eth_dev
, rss_params
)) {
686 vport_update_params
.update_rss_flg
= 1;
687 qdev
->rss_enabled
= 1;
689 qdev
->rss_enabled
= 0;
692 rte_memcpy(&vport_update_params
.rss_params
, rss_params
,
693 sizeof(*rss_params
));
695 rc
= qdev
->ops
->vport_update(edev
, &vport_update_params
);
697 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
705 static bool qede_tunn_exist(uint16_t flag
)
707 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK
<<
708 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT
) & flag
);
711 static inline uint8_t qede_check_tunn_csum(uint16_t flag
)
714 uint16_t csum_flag
= 0;
716 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK
<<
717 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT
) & flag
)
718 csum_flag
|= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK
<<
719 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT
;
721 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
722 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
) & flag
) {
723 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
724 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
725 tcsum
= QEDE_TUNN_CSUM_UNNECESSARY
;
728 csum_flag
|= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK
<<
729 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT
|
730 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
731 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
733 if (csum_flag
& flag
)
734 return QEDE_CSUM_ERROR
;
736 return QEDE_CSUM_UNNECESSARY
| tcsum
;
739 static inline uint8_t qede_tunn_exist(uint16_t flag
)
744 static inline uint8_t qede_check_tunn_csum(uint16_t flag
)
750 static inline uint8_t qede_check_notunn_csum(uint16_t flag
)
753 uint16_t csum_flag
= 0;
755 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
756 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
) & flag
) {
757 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
758 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
759 csum
= QEDE_CSUM_UNNECESSARY
;
762 csum_flag
|= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
763 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
765 if (csum_flag
& flag
)
766 return QEDE_CSUM_ERROR
;
771 static inline uint8_t qede_check_csum(uint16_t flag
)
773 if (likely(!qede_tunn_exist(flag
)))
774 return qede_check_notunn_csum(flag
);
776 return qede_check_tunn_csum(flag
);
779 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue
*rxq
)
781 ecore_chain_consume(&rxq
->rx_bd_ring
);
786 qede_reuse_page(struct qede_dev
*qdev
,
787 struct qede_rx_queue
*rxq
, struct qede_rx_entry
*curr_cons
)
789 struct eth_rx_bd
*rx_bd_prod
= ecore_chain_produce(&rxq
->rx_bd_ring
);
790 uint16_t idx
= rxq
->sw_rx_cons
& NUM_RX_BDS(rxq
);
791 struct qede_rx_entry
*curr_prod
;
792 dma_addr_t new_mapping
;
794 curr_prod
= &rxq
->sw_rx_ring
[idx
];
795 *curr_prod
= *curr_cons
;
797 new_mapping
= rte_mbuf_data_dma_addr_default(curr_prod
->mbuf
) +
798 curr_prod
->page_offset
;
800 rx_bd_prod
->addr
.hi
= rte_cpu_to_le_32(U64_HI(new_mapping
));
801 rx_bd_prod
->addr
.lo
= rte_cpu_to_le_32(U64_LO(new_mapping
));
807 qede_recycle_rx_bd_ring(struct qede_rx_queue
*rxq
,
808 struct qede_dev
*qdev
, uint8_t count
)
810 struct qede_rx_entry
*curr_cons
;
812 for (; count
> 0; count
--) {
813 curr_cons
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS(rxq
)];
814 qede_reuse_page(qdev
, rxq
, curr_cons
);
815 qede_rx_bd_ring_consume(rxq
);
819 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags
)
822 /* TBD - L4 indications needed ? */
823 uint16_t protocol
= ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK
<<
824 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT
) & flags
);
826 /* protocol = 3 means LLC/SNAP over Ethernet */
827 if (unlikely(protocol
== 0 || protocol
== 3))
828 p_type
= RTE_PTYPE_UNKNOWN
;
829 else if (protocol
== 1)
830 p_type
= RTE_PTYPE_L3_IPV4
;
831 else if (protocol
== 2)
832 p_type
= RTE_PTYPE_L3_IPV6
;
834 return RTE_PTYPE_L2_ETHER
| p_type
;
837 int qede_process_sg_pkts(void *p_rxq
, struct rte_mbuf
*rx_mb
,
838 int num_segs
, uint16_t pkt_len
)
840 struct qede_rx_queue
*rxq
= p_rxq
;
841 struct qede_dev
*qdev
= rxq
->qdev
;
842 struct ecore_dev
*edev
= &qdev
->edev
;
843 uint16_t sw_rx_index
, cur_size
;
845 register struct rte_mbuf
*seg1
= NULL
;
846 register struct rte_mbuf
*seg2
= NULL
;
850 cur_size
= pkt_len
> rxq
->rx_buf_size
?
851 rxq
->rx_buf_size
: pkt_len
;
853 PMD_RX_LOG(DEBUG
, rxq
,
854 "SG packet, len and num BD mismatch\n");
855 qede_recycle_rx_bd_ring(rxq
, qdev
, num_segs
);
859 if (qede_alloc_rx_buffer(rxq
)) {
862 PMD_RX_LOG(DEBUG
, rxq
, "Buffer allocation failed\n");
863 index
= rxq
->port_id
;
864 rte_eth_devices
[index
].data
->rx_mbuf_alloc_failed
++;
865 rxq
->rx_alloc_errors
++;
869 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS(rxq
);
870 seg2
= rxq
->sw_rx_ring
[sw_rx_index
].mbuf
;
871 qede_rx_bd_ring_consume(rxq
);
873 seg2
->data_len
= cur_size
;
883 PMD_RX_LOG(DEBUG
, rxq
,
884 "Mapped all BDs of jumbo, but still have %d bytes\n",
887 return ECORE_SUCCESS
;
891 qede_recv_pkts(void *p_rxq
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
893 struct qede_rx_queue
*rxq
= p_rxq
;
894 struct qede_dev
*qdev
= rxq
->qdev
;
895 struct ecore_dev
*edev
= &qdev
->edev
;
896 struct qede_fastpath
*fp
= &qdev
->fp_array
[rxq
->queue_id
];
897 uint16_t hw_comp_cons
, sw_comp_cons
, sw_rx_index
;
899 union eth_rx_cqe
*cqe
;
900 struct eth_fast_path_rx_reg_cqe
*fp_cqe
;
901 register struct rte_mbuf
*rx_mb
= NULL
;
902 register struct rte_mbuf
*seg1
= NULL
;
903 enum eth_rx_cqe_type cqe_type
;
904 uint16_t len
, pad
, preload_idx
, pkt_len
, parse_flag
;
905 uint8_t csum_flag
, num_segs
;
906 enum rss_hash_type htype
;
909 hw_comp_cons
= rte_le_to_cpu_16(*rxq
->hw_cons_ptr
);
910 sw_comp_cons
= ecore_chain_get_cons_idx(&rxq
->rx_comp_ring
);
914 if (hw_comp_cons
== sw_comp_cons
)
917 while (sw_comp_cons
!= hw_comp_cons
) {
918 /* Get the CQE from the completion ring */
920 (union eth_rx_cqe
*)ecore_chain_consume(&rxq
->rx_comp_ring
);
921 cqe_type
= cqe
->fast_path_regular
.type
;
923 if (unlikely(cqe_type
== ETH_RX_CQE_TYPE_SLOW_PATH
)) {
924 PMD_RX_LOG(DEBUG
, rxq
, "Got a slowath CQE\n");
926 qdev
->ops
->eth_cqe_completion(edev
, fp
->id
,
927 (struct eth_slow_path_rx_cqe
*)cqe
);
931 /* Get the data from the SW ring */
932 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS(rxq
);
933 rx_mb
= rxq
->sw_rx_ring
[sw_rx_index
].mbuf
;
934 assert(rx_mb
!= NULL
);
937 fp_cqe
= &cqe
->fast_path_regular
;
939 len
= rte_le_to_cpu_16(fp_cqe
->len_on_first_bd
);
940 pad
= fp_cqe
->placement_offset
;
941 assert((len
+ pad
) <= rx_mb
->buf_len
);
943 PMD_RX_LOG(DEBUG
, rxq
,
944 "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
945 " len = %u, parsing_flags = %d\n",
946 cqe_type
, fp_cqe
->bitfields
,
947 rte_le_to_cpu_16(fp_cqe
->vlan_tag
),
948 len
, rte_le_to_cpu_16(fp_cqe
->pars_flags
.flags
));
950 /* If this is an error packet then drop it */
952 rte_le_to_cpu_16(cqe
->fast_path_regular
.pars_flags
.flags
);
953 csum_flag
= qede_check_csum(parse_flag
);
954 if (unlikely(csum_flag
== QEDE_CSUM_ERROR
)) {
956 "CQE in CONS = %u has error, flags = 0x%x "
957 "dropping incoming packet\n",
958 sw_comp_cons
, parse_flag
);
960 qede_recycle_rx_bd_ring(rxq
, qdev
, fp_cqe
->bd_num
);
964 if (unlikely(qede_alloc_rx_buffer(rxq
) != 0)) {
966 "New buffer allocation failed,"
967 "dropping incoming packet\n");
968 qede_recycle_rx_bd_ring(rxq
, qdev
, fp_cqe
->bd_num
);
969 rte_eth_devices
[rxq
->port_id
].
970 data
->rx_mbuf_alloc_failed
++;
971 rxq
->rx_alloc_errors
++;
975 qede_rx_bd_ring_consume(rxq
);
977 if (fp_cqe
->bd_num
> 1) {
978 pkt_len
= rte_le_to_cpu_16(fp_cqe
->pkt_len
);
979 num_segs
= fp_cqe
->bd_num
- 1;
985 ret
= qede_process_sg_pkts(p_rxq
, seg1
, num_segs
,
987 if (ret
!= ECORE_SUCCESS
) {
988 qede_recycle_rx_bd_ring(rxq
, qdev
,
994 /* Prefetch next mbuf while processing current one. */
995 preload_idx
= rxq
->sw_rx_cons
& NUM_RX_BDS(rxq
);
996 rte_prefetch0(rxq
->sw_rx_ring
[preload_idx
].mbuf
);
998 /* Update MBUF fields */
1000 rx_mb
->data_off
= pad
+ RTE_PKTMBUF_HEADROOM
;
1001 rx_mb
->nb_segs
= fp_cqe
->bd_num
;
1002 rx_mb
->data_len
= len
;
1003 rx_mb
->pkt_len
= fp_cqe
->pkt_len
;
1004 rx_mb
->port
= rxq
->port_id
;
1005 rx_mb
->packet_type
= qede_rx_cqe_to_pkt_type(parse_flag
);
1007 htype
= (uint8_t)GET_FIELD(fp_cqe
->bitfields
,
1008 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE
);
1009 if (qdev
->rss_enabled
&& htype
) {
1010 rx_mb
->ol_flags
|= PKT_RX_RSS_HASH
;
1011 rx_mb
->hash
.rss
= rte_le_to_cpu_32(fp_cqe
->rss_hash
);
1012 PMD_RX_LOG(DEBUG
, rxq
, "Hash result 0x%x\n",
1016 rte_prefetch1(rte_pktmbuf_mtod(rx_mb
, void *));
1018 if (CQE_HAS_VLAN(parse_flag
)) {
1019 rx_mb
->vlan_tci
= rte_le_to_cpu_16(fp_cqe
->vlan_tag
);
1020 rx_mb
->ol_flags
|= PKT_RX_VLAN_PKT
;
1023 if (CQE_HAS_OUTER_VLAN(parse_flag
)) {
1024 /* FW does not provide indication of Outer VLAN tag,
1025 * which is always stripped, so vlan_tci_outer is set
1026 * to 0. Here vlan_tag represents inner VLAN tag.
1028 rx_mb
->vlan_tci
= rte_le_to_cpu_16(fp_cqe
->vlan_tag
);
1029 rx_mb
->ol_flags
|= PKT_RX_QINQ_PKT
;
1030 rx_mb
->vlan_tci_outer
= 0;
1033 rx_pkts
[rx_pkt
] = rx_mb
;
1036 ecore_chain_recycle_consumed(&rxq
->rx_comp_ring
);
1037 sw_comp_cons
= ecore_chain_get_cons_idx(&rxq
->rx_comp_ring
);
1038 if (rx_pkt
== nb_pkts
) {
1039 PMD_RX_LOG(DEBUG
, rxq
,
1040 "Budget reached nb_pkts=%u received=%u\n",
1046 qede_update_rx_prod(qdev
, rxq
);
1048 rxq
->rcv_pkts
+= rx_pkt
;
1050 PMD_RX_LOG(DEBUG
, rxq
, "rx_pkts=%u core=%d\n", rx_pkt
, rte_lcore_id());
1056 qede_free_tx_pkt(struct ecore_dev
*edev
, struct qede_tx_queue
*txq
)
1058 uint16_t nb_segs
, idx
= TX_CONS(txq
);
1059 struct eth_tx_bd
*tx_data_bd
;
1060 struct rte_mbuf
*mbuf
= txq
->sw_tx_ring
[idx
].mbuf
;
1062 if (unlikely(!mbuf
)) {
1063 PMD_TX_LOG(ERR
, txq
, "null mbuf\n");
1064 PMD_TX_LOG(ERR
, txq
,
1065 "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
1066 txq
->nb_tx_desc
, txq
->nb_tx_avail
, idx
,
1071 nb_segs
= mbuf
->nb_segs
;
1073 /* It's like consuming rxbuf in recv() */
1074 ecore_chain_consume(&txq
->tx_pbl
);
1078 rte_pktmbuf_free(mbuf
);
1079 txq
->sw_tx_ring
[idx
].mbuf
= NULL
;
1084 static inline uint16_t
1085 qede_process_tx_compl(struct ecore_dev
*edev
, struct qede_tx_queue
*txq
)
1087 uint16_t tx_compl
= 0;
1088 uint16_t hw_bd_cons
;
1090 hw_bd_cons
= rte_le_to_cpu_16(*txq
->hw_cons_ptr
);
1091 rte_compiler_barrier();
1093 while (hw_bd_cons
!= ecore_chain_get_cons_idx(&txq
->tx_pbl
)) {
1094 if (qede_free_tx_pkt(edev
, txq
)) {
1095 PMD_TX_LOG(ERR
, txq
,
1096 "hw_bd_cons = %u, chain_cons = %u\n",
1098 ecore_chain_get_cons_idx(&txq
->tx_pbl
));
1101 txq
->sw_tx_cons
++; /* Making TXD available */
1105 PMD_TX_LOG(DEBUG
, txq
, "Tx compl %u sw_tx_cons %u avail %u\n",
1106 tx_compl
, txq
->sw_tx_cons
, txq
->nb_tx_avail
);
1110 /* Populate scatter gather buffer descriptor fields */
1111 static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue
*p_txq
,
1112 struct rte_mbuf
*m_seg
,
1114 struct eth_tx_1st_bd
*bd1
)
1116 struct qede_tx_queue
*txq
= p_txq
;
1117 struct eth_tx_2nd_bd
*bd2
= NULL
;
1118 struct eth_tx_3rd_bd
*bd3
= NULL
;
1119 struct eth_tx_bd
*tx_bd
= NULL
;
1120 uint16_t nb_segs
= count
;
1123 /* Check for scattered buffers */
1126 bd2
= (struct eth_tx_2nd_bd
*)
1127 ecore_chain_produce(&txq
->tx_pbl
);
1128 memset(bd2
, 0, sizeof(*bd2
));
1129 mapping
= rte_mbuf_data_dma_addr(m_seg
);
1130 bd2
->addr
.hi
= rte_cpu_to_le_32(U64_HI(mapping
));
1131 bd2
->addr
.lo
= rte_cpu_to_le_32(U64_LO(mapping
));
1132 bd2
->nbytes
= rte_cpu_to_le_16(m_seg
->data_len
);
1133 } else if (nb_segs
== 2) {
1134 bd3
= (struct eth_tx_3rd_bd
*)
1135 ecore_chain_produce(&txq
->tx_pbl
);
1136 memset(bd3
, 0, sizeof(*bd3
));
1137 mapping
= rte_mbuf_data_dma_addr(m_seg
);
1138 bd3
->addr
.hi
= rte_cpu_to_le_32(U64_HI(mapping
));
1139 bd3
->addr
.lo
= rte_cpu_to_le_32(U64_LO(mapping
));
1140 bd3
->nbytes
= rte_cpu_to_le_16(m_seg
->data_len
);
1142 tx_bd
= (struct eth_tx_bd
*)
1143 ecore_chain_produce(&txq
->tx_pbl
);
1144 memset(tx_bd
, 0, sizeof(*tx_bd
));
1145 mapping
= rte_mbuf_data_dma_addr(m_seg
);
1146 tx_bd
->addr
.hi
= rte_cpu_to_le_32(U64_HI(mapping
));
1147 tx_bd
->addr
.lo
= rte_cpu_to_le_32(U64_LO(mapping
));
1148 tx_bd
->nbytes
= rte_cpu_to_le_16(m_seg
->data_len
);
1151 bd1
->data
.nbds
= nb_segs
;
1152 m_seg
= m_seg
->next
;
1155 /* Return total scattered buffers */
1160 qede_xmit_pkts(void *p_txq
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
1162 struct qede_tx_queue
*txq
= p_txq
;
1163 struct qede_dev
*qdev
= txq
->qdev
;
1164 struct ecore_dev
*edev
= &qdev
->edev
;
1165 struct qede_fastpath
*fp
;
1166 struct eth_tx_1st_bd
*bd1
;
1167 struct rte_mbuf
*m_seg
= NULL
;
1168 uint16_t nb_tx_pkts
;
1169 uint16_t nb_pkt_sent
= 0;
1173 uint16_t nb_segs
= 0;
1175 fp
= &qdev
->fp_array
[QEDE_RSS_COUNT(qdev
) + txq
->queue_id
];
1177 if (unlikely(txq
->nb_tx_avail
< txq
->tx_free_thresh
)) {
1178 PMD_TX_LOG(DEBUG
, txq
, "send=%u avail=%u free_thresh=%u\n",
1179 nb_pkts
, txq
->nb_tx_avail
, txq
->tx_free_thresh
);
1180 (void)qede_process_tx_compl(edev
, txq
);
1183 nb_tx_pkts
= RTE_MIN(nb_pkts
, (txq
->nb_tx_avail
/
1184 ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
));
1185 if (unlikely(nb_tx_pkts
== 0)) {
1186 PMD_TX_LOG(DEBUG
, txq
, "Out of BDs nb_pkts=%u avail=%u\n",
1187 nb_pkts
, txq
->nb_tx_avail
);
1191 tx_count
= nb_tx_pkts
;
1192 while (nb_tx_pkts
--) {
1193 /* Fill the entry in the SW ring and the BDs in the FW ring */
1195 struct rte_mbuf
*mbuf
= *tx_pkts
++;
1197 txq
->sw_tx_ring
[idx
].mbuf
= mbuf
;
1198 bd1
= (struct eth_tx_1st_bd
*)ecore_chain_produce(&txq
->tx_pbl
);
1199 /* Zero init struct fields */
1200 bd1
->data
.bd_flags
.bitfields
= 0;
1201 bd1
->data
.bitfields
= 0;
1203 bd1
->data
.bd_flags
.bitfields
=
1204 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT
;
1205 /* Map MBUF linear data for DMA and set in the first BD */
1206 QEDE_BD_SET_ADDR_LEN(bd1
, rte_mbuf_data_dma_addr(mbuf
),
1209 /* Descriptor based VLAN insertion */
1210 if (mbuf
->ol_flags
& (PKT_TX_VLAN_PKT
| PKT_TX_QINQ_PKT
)) {
1211 bd1
->data
.vlan
= rte_cpu_to_le_16(mbuf
->vlan_tci
);
1212 bd1
->data
.bd_flags
.bitfields
|=
1213 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT
;
1216 /* Offload the IP checksum in the hardware */
1217 if (mbuf
->ol_flags
& PKT_TX_IP_CKSUM
) {
1218 bd1
->data
.bd_flags
.bitfields
|=
1219 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT
;
1222 /* L4 checksum offload (tcp or udp) */
1223 if (mbuf
->ol_flags
& (PKT_TX_TCP_CKSUM
| PKT_TX_UDP_CKSUM
)) {
1224 bd1
->data
.bd_flags
.bitfields
|=
1225 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT
;
1226 /* IPv6 + extn. -> later */
1229 /* Handle fragmented MBUF */
1232 bd1
->data
.nbds
= nb_segs
;
1233 /* Encode scatter gather buffer descriptors if required */
1234 nb_segs
= qede_encode_sg_bd(txq
, m_seg
, nb_segs
, bd1
);
1235 txq
->nb_tx_avail
= txq
->nb_tx_avail
- nb_segs
;
1238 rte_prefetch0(txq
->sw_tx_ring
[TX_PROD(txq
)].mbuf
);
1240 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq
->tx_pbl
));
1245 /* Write value of prod idx into bd_prod */
1246 txq
->tx_db
.data
.bd_prod
= bd_prod
;
1248 rte_compiler_barrier();
1249 DIRECT_REG_WR(edev
, txq
->doorbell_addr
, txq
->tx_db
.raw
);
1252 /* Check again for Tx completions */
1253 (void)qede_process_tx_compl(edev
, txq
);
1255 PMD_TX_LOG(DEBUG
, txq
, "to_send=%u can_send=%u sent=%u core=%d\n",
1256 nb_pkts
, tx_count
, nb_pkt_sent
, rte_lcore_id());
1261 static void qede_init_fp_queue(struct rte_eth_dev
*eth_dev
)
1263 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1264 struct qede_fastpath
*fp
;
1265 uint8_t i
, rss_id
, txq_index
, tc
;
1266 int rxq
= 0, txq
= 0;
1269 fp
= &qdev
->fp_array
[i
];
1270 if (fp
->type
& QEDE_FASTPATH_RX
) {
1271 fp
->rxq
= eth_dev
->data
->rx_queues
[i
];
1272 fp
->rxq
->queue_id
= rxq
++;
1275 if (fp
->type
& QEDE_FASTPATH_TX
) {
1276 for (tc
= 0; tc
< qdev
->num_tc
; tc
++) {
1277 txq_index
= tc
* QEDE_TSS_COUNT(qdev
) + txq
;
1279 eth_dev
->data
->tx_queues
[txq_index
];
1280 fp
->txqs
[tc
]->queue_id
= txq_index
;
1287 int qede_dev_start(struct rte_eth_dev
*eth_dev
)
1289 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1290 struct ecore_dev
*edev
= &qdev
->edev
;
1291 struct qed_link_output link_output
;
1292 struct qede_fastpath
*fp
;
1295 DP_INFO(edev
, "Device state is %d\n", qdev
->state
);
1297 if (qdev
->state
== QEDE_DEV_START
) {
1298 DP_INFO(edev
, "Port is already started\n");
1302 if (qdev
->state
== QEDE_DEV_CONFIG
)
1303 qede_init_fp_queue(eth_dev
);
1305 rc
= qede_start_queues(eth_dev
, true);
1307 DP_ERR(edev
, "Failed to start queues\n");
1312 /* Bring-up the link */
1313 qede_dev_set_link_state(eth_dev
, true);
1316 if (qede_reset_fp_rings(qdev
))
1319 /* Start/resume traffic */
1320 qdev
->ops
->fastpath_start(edev
);
1322 qdev
->state
= QEDE_DEV_START
;
1324 DP_INFO(edev
, "dev_state is QEDE_DEV_START\n");
1329 static int qede_drain_txq(struct qede_dev
*qdev
,
1330 struct qede_tx_queue
*txq
, bool allow_drain
)
1332 struct ecore_dev
*edev
= &qdev
->edev
;
1335 while (txq
->sw_tx_cons
!= txq
->sw_tx_prod
) {
1336 qede_process_tx_compl(edev
, txq
);
1339 DP_NOTICE(edev
, false,
1340 "Tx queue[%u] is stuck,"
1341 "requesting MCP to drain\n",
1343 rc
= qdev
->ops
->common
->drain(edev
);
1346 return qede_drain_txq(qdev
, txq
, false);
1349 DP_NOTICE(edev
, false,
1350 "Timeout waiting for tx queue[%d]:"
1351 "PROD=%d, CONS=%d\n",
1352 txq
->queue_id
, txq
->sw_tx_prod
,
1358 rte_compiler_barrier();
1361 /* FW finished processing, wait for HW to transmit all tx packets */
1367 static int qede_stop_queues(struct qede_dev
*qdev
)
1369 struct qed_update_vport_params vport_update_params
;
1370 struct ecore_dev
*edev
= &qdev
->edev
;
1373 /* Disable the vport */
1374 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
1375 vport_update_params
.vport_id
= 0;
1376 vport_update_params
.update_vport_active_flg
= 1;
1377 vport_update_params
.vport_active_flg
= 0;
1378 vport_update_params
.update_rss_flg
= 0;
1380 DP_INFO(edev
, "Deactivate vport\n");
1382 rc
= qdev
->ops
->vport_update(edev
, &vport_update_params
);
1384 DP_ERR(edev
, "Failed to update vport\n");
1388 DP_INFO(edev
, "Flushing tx queues\n");
1390 /* Flush Tx queues. If needed, request drain from MCP */
1392 struct qede_fastpath
*fp
= &qdev
->fp_array
[i
];
1394 if (fp
->type
& QEDE_FASTPATH_TX
) {
1395 for (tc
= 0; tc
< qdev
->num_tc
; tc
++) {
1396 struct qede_tx_queue
*txq
= fp
->txqs
[tc
];
1398 rc
= qede_drain_txq(qdev
, txq
, true);
1405 /* Stop all Queues in reverse order */
1406 for (i
= QEDE_QUEUE_CNT(qdev
) - 1; i
>= 0; i
--) {
1407 struct qed_stop_rxq_params rx_params
;
1409 /* Stop the Tx Queue(s) */
1410 if (qdev
->fp_array
[i
].type
& QEDE_FASTPATH_TX
) {
1411 for (tc
= 0; tc
< qdev
->num_tc
; tc
++) {
1412 struct qed_stop_txq_params tx_params
;
1415 tx_params
.rss_id
= i
;
1416 val
= qdev
->fp_array
[i
].txqs
[tc
]->queue_id
;
1417 tx_params
.tx_queue_id
= val
;
1419 DP_INFO(edev
, "Stopping tx queues\n");
1420 rc
= qdev
->ops
->q_tx_stop(edev
, &tx_params
);
1422 DP_ERR(edev
, "Failed to stop TXQ #%d\n",
1423 tx_params
.tx_queue_id
);
1429 /* Stop the Rx Queue */
1430 if (qdev
->fp_array
[i
].type
& QEDE_FASTPATH_RX
) {
1431 memset(&rx_params
, 0, sizeof(rx_params
));
1432 rx_params
.rss_id
= i
;
1433 rx_params
.rx_queue_id
= qdev
->fp_array
[i
].rxq
->queue_id
;
1434 rx_params
.eq_completion_only
= 1;
1436 DP_INFO(edev
, "Stopping rx queues\n");
1438 rc
= qdev
->ops
->q_rx_stop(edev
, &rx_params
);
1440 DP_ERR(edev
, "Failed to stop RXQ #%d\n", i
);
1449 int qede_reset_fp_rings(struct qede_dev
*qdev
)
1451 struct qede_fastpath
*fp
;
1452 struct qede_tx_queue
*txq
;
1456 for_each_queue(id
) {
1457 fp
= &qdev
->fp_array
[id
];
1459 if (fp
->type
& QEDE_FASTPATH_RX
) {
1460 DP_INFO(&qdev
->edev
,
1461 "Reset FP chain for RSS %u\n", id
);
1462 qede_rx_queue_release_mbufs(fp
->rxq
);
1463 ecore_chain_reset(&fp
->rxq
->rx_bd_ring
);
1464 ecore_chain_reset(&fp
->rxq
->rx_comp_ring
);
1465 fp
->rxq
->sw_rx_prod
= 0;
1466 fp
->rxq
->sw_rx_cons
= 0;
1467 *fp
->rxq
->hw_cons_ptr
= 0;
1468 for (i
= 0; i
< fp
->rxq
->nb_rx_desc
; i
++) {
1469 if (qede_alloc_rx_buffer(fp
->rxq
)) {
1471 "RX buffer allocation failed\n");
1476 if (fp
->type
& QEDE_FASTPATH_TX
) {
1477 for (tc
= 0; tc
< qdev
->num_tc
; tc
++) {
1479 qede_tx_queue_release_mbufs(txq
);
1480 ecore_chain_reset(&txq
->tx_pbl
);
1481 txq
->sw_tx_cons
= 0;
1482 txq
->sw_tx_prod
= 0;
1483 *txq
->hw_cons_ptr
= 0;
1491 /* This function frees all memory of a single fp */
1492 void qede_free_mem_load(struct rte_eth_dev
*eth_dev
)
1494 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1495 struct qede_fastpath
*fp
;
1500 for_each_queue(id
) {
1501 fp
= &qdev
->fp_array
[id
];
1502 if (fp
->type
& QEDE_FASTPATH_RX
) {
1503 qede_rx_queue_release(fp
->rxq
);
1504 eth_dev
->data
->rx_queues
[id
] = NULL
;
1506 for (tc
= 0; tc
< qdev
->num_tc
; tc
++) {
1507 txq_idx
= fp
->txqs
[tc
]->queue_id
;
1508 qede_tx_queue_release(fp
->txqs
[tc
]);
1509 eth_dev
->data
->tx_queues
[txq_idx
] = NULL
;
1515 void qede_dev_stop(struct rte_eth_dev
*eth_dev
)
1517 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1518 struct ecore_dev
*edev
= &qdev
->edev
;
1520 DP_INFO(edev
, "port %u\n", eth_dev
->data
->port_id
);
1522 if (qdev
->state
!= QEDE_DEV_START
) {
1523 DP_INFO(edev
, "Device not yet started\n");
1527 if (qede_stop_queues(qdev
))
1528 DP_ERR(edev
, "Didn't succeed to close queues\n");
1530 DP_INFO(edev
, "Stopped queues\n");
1532 qdev
->ops
->fastpath_stop(edev
);
1534 /* Bring the link down */
1535 qede_dev_set_link_state(eth_dev
, false);
1537 qdev
->state
= QEDE_DEV_STOP
;
1539 DP_INFO(edev
, "dev_state is QEDE_DEV_STOP\n");