1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_mempool.h>
15 #include "sfc_debug.h"
19 #include "sfc_kvargs.h"
20 #include "sfc_tweak.h"
23 * Maximum number of Rx queue flush attempt in the case of failure or
26 #define SFC_RX_QFLUSH_ATTEMPTS (3)
29 * Time to wait between event queue polling attempts when waiting for Rx
30 * queue flush done or failed events.
32 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
35 * Maximum number of event queue polling attempts when waiting for Rx queue
36 * flush done or failed events. It defines Rx queue flush attempt timeout
37 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
39 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
42 sfc_rx_qflush_done(struct sfc_rxq_info
*rxq_info
)
44 rxq_info
->state
|= SFC_RXQ_FLUSHED
;
45 rxq_info
->state
&= ~SFC_RXQ_FLUSHING
;
49 sfc_rx_qflush_failed(struct sfc_rxq_info
*rxq_info
)
51 rxq_info
->state
|= SFC_RXQ_FLUSH_FAILED
;
52 rxq_info
->state
&= ~SFC_RXQ_FLUSHING
;
56 sfc_efx_rx_qrefill(struct sfc_efx_rxq
*rxq
)
58 unsigned int free_space
;
60 void *objs
[SFC_RX_REFILL_BULK
];
61 efsys_dma_addr_t addr
[RTE_DIM(objs
)];
62 unsigned int added
= rxq
->added
;
65 struct sfc_efx_rx_sw_desc
*rxd
;
67 uint16_t port_id
= rxq
->dp
.dpq
.port_id
;
69 free_space
= rxq
->max_fill_level
- (added
- rxq
->completed
);
71 if (free_space
< rxq
->refill_threshold
)
74 bulks
= free_space
/ RTE_DIM(objs
);
75 /* refill_threshold guarantees that bulks is positive */
76 SFC_ASSERT(bulks
> 0);
78 id
= added
& rxq
->ptr_mask
;
80 if (unlikely(rte_mempool_get_bulk(rxq
->refill_mb_pool
, objs
,
81 RTE_DIM(objs
)) < 0)) {
83 * It is hardly a safe way to increment counter
84 * from different contexts, but all PMDs do it.
86 rxq
->evq
->sa
->eth_dev
->data
->rx_mbuf_alloc_failed
+=
88 /* Return if we have posted nothing yet */
89 if (added
== rxq
->added
)
95 for (i
= 0; i
< RTE_DIM(objs
);
96 ++i
, id
= (id
+ 1) & rxq
->ptr_mask
) {
99 MBUF_RAW_ALLOC_CHECK(m
);
101 rxd
= &rxq
->sw_desc
[id
];
104 m
->data_off
= RTE_PKTMBUF_HEADROOM
;
107 addr
[i
] = rte_pktmbuf_iova(m
);
110 efx_rx_qpost(rxq
->common
, addr
, rxq
->buf_size
,
111 RTE_DIM(objs
), rxq
->completed
, added
);
112 added
+= RTE_DIM(objs
);
113 } while (--bulks
> 0);
115 SFC_ASSERT(added
!= rxq
->added
);
117 efx_rx_qpush(rxq
->common
, added
, &rxq
->pushed
);
121 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags
)
123 uint64_t mbuf_flags
= 0;
125 switch (desc_flags
& (EFX_PKT_IPV4
| EFX_CKSUM_IPV4
)) {
126 case (EFX_PKT_IPV4
| EFX_CKSUM_IPV4
):
127 mbuf_flags
|= PKT_RX_IP_CKSUM_GOOD
;
130 mbuf_flags
|= PKT_RX_IP_CKSUM_BAD
;
133 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN
!= 0);
134 SFC_ASSERT((mbuf_flags
& PKT_RX_IP_CKSUM_MASK
) ==
135 PKT_RX_IP_CKSUM_UNKNOWN
);
139 switch ((desc_flags
&
140 (EFX_PKT_TCP
| EFX_PKT_UDP
| EFX_CKSUM_TCPUDP
))) {
141 case (EFX_PKT_TCP
| EFX_CKSUM_TCPUDP
):
142 case (EFX_PKT_UDP
| EFX_CKSUM_TCPUDP
):
143 mbuf_flags
|= PKT_RX_L4_CKSUM_GOOD
;
147 mbuf_flags
|= PKT_RX_L4_CKSUM_BAD
;
150 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN
!= 0);
151 SFC_ASSERT((mbuf_flags
& PKT_RX_L4_CKSUM_MASK
) ==
152 PKT_RX_L4_CKSUM_UNKNOWN
);
160 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags
)
162 return RTE_PTYPE_L2_ETHER
|
163 ((desc_flags
& EFX_PKT_IPV4
) ?
164 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
: 0) |
165 ((desc_flags
& EFX_PKT_IPV6
) ?
166 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
: 0) |
167 ((desc_flags
& EFX_PKT_TCP
) ? RTE_PTYPE_L4_TCP
: 0) |
168 ((desc_flags
& EFX_PKT_UDP
) ? RTE_PTYPE_L4_UDP
: 0);
171 static const uint32_t *
172 sfc_efx_supported_ptypes_get(__rte_unused
uint32_t tunnel_encaps
)
174 static const uint32_t ptypes
[] = {
176 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
,
177 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
,
187 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq
*rxq
, unsigned int flags
,
193 if ((rxq
->flags
& SFC_EFX_RXQ_FLAG_RSS_HASH
) == 0)
196 mbuf_data
= rte_pktmbuf_mtod(m
, uint8_t *);
198 if (flags
& (EFX_PKT_IPV4
| EFX_PKT_IPV6
)) {
199 m
->hash
.rss
= efx_pseudo_hdr_hash_get(rxq
->common
,
200 EFX_RX_HASHALG_TOEPLITZ
,
203 m
->ol_flags
|= PKT_RX_RSS_HASH
;
208 sfc_efx_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
210 struct sfc_dp_rxq
*dp_rxq
= rx_queue
;
211 struct sfc_efx_rxq
*rxq
= sfc_efx_rxq_by_dp_rxq(dp_rxq
);
212 unsigned int completed
;
213 unsigned int prefix_size
= rxq
->prefix_size
;
214 unsigned int done_pkts
= 0;
215 boolean_t discard_next
= B_FALSE
;
216 struct rte_mbuf
*scatter_pkt
= NULL
;
218 if (unlikely((rxq
->flags
& SFC_EFX_RXQ_FLAG_RUNNING
) == 0))
221 sfc_ev_qpoll(rxq
->evq
);
223 completed
= rxq
->completed
;
224 while (completed
!= rxq
->pending
&& done_pkts
< nb_pkts
) {
226 struct sfc_efx_rx_sw_desc
*rxd
;
228 unsigned int seg_len
;
229 unsigned int desc_flags
;
231 id
= completed
++ & rxq
->ptr_mask
;
232 rxd
= &rxq
->sw_desc
[id
];
234 desc_flags
= rxd
->flags
;
239 if (desc_flags
& (EFX_ADDR_MISMATCH
| EFX_DISCARD
))
242 if (desc_flags
& EFX_PKT_PREFIX_LEN
) {
246 rc
= efx_pseudo_hdr_pkt_length_get(rxq
->common
,
247 rte_pktmbuf_mtod(m
, uint8_t *), &tmp_size
);
251 seg_len
= rxd
->size
- prefix_size
;
254 rte_pktmbuf_data_len(m
) = seg_len
;
255 rte_pktmbuf_pkt_len(m
) = seg_len
;
257 if (scatter_pkt
!= NULL
) {
258 if (rte_pktmbuf_chain(scatter_pkt
, m
) != 0) {
259 rte_pktmbuf_free(scatter_pkt
);
262 /* The packet to deliver */
266 if (desc_flags
& EFX_PKT_CONT
) {
267 /* The packet is scattered, more fragments to come */
269 /* Further fragments have no prefix */
274 /* Scattered packet is done */
276 /* The first fragment of the packet has prefix */
277 prefix_size
= rxq
->prefix_size
;
280 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags
);
282 sfc_efx_rx_desc_flags_to_packet_type(desc_flags
);
285 * Extract RSS hash from the packet prefix and
286 * set the corresponding field (if needed and possible)
288 sfc_efx_rx_set_rss_hash(rxq
, desc_flags
, m
);
290 m
->data_off
+= prefix_size
;
297 discard_next
= ((desc_flags
& EFX_PKT_CONT
) != 0);
298 rte_mbuf_raw_free(m
);
302 /* pending is only moved when entire packet is received */
303 SFC_ASSERT(scatter_pkt
== NULL
);
305 rxq
->completed
= completed
;
307 sfc_efx_rx_qrefill(rxq
);
312 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending
;
314 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq
*dp_rxq
)
316 struct sfc_efx_rxq
*rxq
= sfc_efx_rxq_by_dp_rxq(dp_rxq
);
318 if ((rxq
->flags
& SFC_EFX_RXQ_FLAG_RUNNING
) == 0)
321 sfc_ev_qpoll(rxq
->evq
);
323 return rxq
->pending
- rxq
->completed
;
326 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status
;
328 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq
*dp_rxq
, uint16_t offset
)
330 struct sfc_efx_rxq
*rxq
= sfc_efx_rxq_by_dp_rxq(dp_rxq
);
332 if (unlikely(offset
> rxq
->ptr_mask
))
336 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
337 * it is required for the queue to be running, but the
338 * check is omitted because API design assumes that it
339 * is the duty of the caller to satisfy all conditions
341 SFC_ASSERT((rxq
->flags
& SFC_EFX_RXQ_FLAG_RUNNING
) ==
342 SFC_EFX_RXQ_FLAG_RUNNING
);
343 sfc_ev_qpoll(rxq
->evq
);
346 * There is a handful of reserved entries in the ring,
347 * but an explicit check whether the offset points to
348 * a reserved entry is neglected since the two checks
349 * below rely on the figures which take the HW limits
350 * into account and thus if an entry is reserved, the
351 * checks will fail and UNAVAIL code will be returned
354 if (offset
< (rxq
->pending
- rxq
->completed
))
355 return RTE_ETH_RX_DESC_DONE
;
357 if (offset
< (rxq
->added
- rxq
->completed
))
358 return RTE_ETH_RX_DESC_AVAIL
;
360 return RTE_ETH_RX_DESC_UNAVAIL
;
364 sfc_rx_check_scatter(size_t pdu
, size_t rx_buf_size
, uint32_t rx_prefix_size
,
365 boolean_t rx_scatter_enabled
, const char **error
)
367 if ((rx_buf_size
< pdu
+ rx_prefix_size
) && !rx_scatter_enabled
) {
368 *error
= "Rx scatter is disabled and RxQ mbuf pool object size is too small";
375 /** Get Rx datapath ops by the datapath RxQ handle */
376 const struct sfc_dp_rx
*
377 sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq
*dp_rxq
)
379 const struct sfc_dp_queue
*dpq
= &dp_rxq
->dpq
;
380 struct rte_eth_dev
*eth_dev
;
381 struct sfc_adapter_priv
*sap
;
383 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq
->port_id
));
384 eth_dev
= &rte_eth_devices
[dpq
->port_id
];
386 sap
= sfc_adapter_priv_by_eth_dev(eth_dev
);
391 struct sfc_rxq_info
*
392 sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq
*dp_rxq
)
394 const struct sfc_dp_queue
*dpq
= &dp_rxq
->dpq
;
395 struct rte_eth_dev
*eth_dev
;
396 struct sfc_adapter_shared
*sas
;
398 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq
->port_id
));
399 eth_dev
= &rte_eth_devices
[dpq
->port_id
];
401 sas
= sfc_adapter_shared_by_eth_dev(eth_dev
);
403 SFC_ASSERT(dpq
->queue_id
< sas
->rxq_count
);
404 return &sas
->rxq_info
[dpq
->queue_id
];
408 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq
*dp_rxq
)
410 const struct sfc_dp_queue
*dpq
= &dp_rxq
->dpq
;
411 struct rte_eth_dev
*eth_dev
;
412 struct sfc_adapter
*sa
;
414 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq
->port_id
));
415 eth_dev
= &rte_eth_devices
[dpq
->port_id
];
417 sa
= sfc_adapter_by_eth_dev(eth_dev
);
419 SFC_ASSERT(dpq
->queue_id
< sfc_sa2shared(sa
)->rxq_count
);
420 return &sa
->rxq_ctrl
[dpq
->queue_id
];
423 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings
;
425 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc
,
426 __rte_unused
struct sfc_dp_rx_hw_limits
*limits
,
427 __rte_unused
struct rte_mempool
*mb_pool
,
428 unsigned int *rxq_entries
,
429 unsigned int *evq_entries
,
430 unsigned int *rxq_max_fill_level
)
432 *rxq_entries
= nb_rx_desc
;
433 *evq_entries
= nb_rx_desc
;
434 *rxq_max_fill_level
= EFX_RXQ_LIMIT(*rxq_entries
);
438 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate
;
440 sfc_efx_rx_qcreate(uint16_t port_id
, uint16_t queue_id
,
441 const struct rte_pci_addr
*pci_addr
, int socket_id
,
442 const struct sfc_dp_rx_qcreate_info
*info
,
443 struct sfc_dp_rxq
**dp_rxqp
)
445 struct sfc_efx_rxq
*rxq
;
449 rxq
= rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq
),
450 RTE_CACHE_LINE_SIZE
, socket_id
);
454 sfc_dp_queue_init(&rxq
->dp
.dpq
, port_id
, queue_id
, pci_addr
);
457 rxq
->sw_desc
= rte_calloc_socket("sfc-efx-rxq-sw_desc",
459 sizeof(*rxq
->sw_desc
),
460 RTE_CACHE_LINE_SIZE
, socket_id
);
461 if (rxq
->sw_desc
== NULL
)
462 goto fail_desc_alloc
;
464 /* efx datapath is bound to efx control path */
465 rxq
->evq
= sfc_rxq_by_dp_rxq(&rxq
->dp
)->evq
;
466 if (info
->flags
& SFC_RXQ_FLAG_RSS_HASH
)
467 rxq
->flags
|= SFC_EFX_RXQ_FLAG_RSS_HASH
;
468 rxq
->ptr_mask
= info
->rxq_entries
- 1;
469 rxq
->batch_max
= info
->batch_max
;
470 rxq
->prefix_size
= info
->prefix_size
;
471 rxq
->max_fill_level
= info
->max_fill_level
;
472 rxq
->refill_threshold
= info
->refill_threshold
;
473 rxq
->buf_size
= info
->buf_size
;
474 rxq
->refill_mb_pool
= info
->refill_mb_pool
;
486 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy
;
488 sfc_efx_rx_qdestroy(struct sfc_dp_rxq
*dp_rxq
)
490 struct sfc_efx_rxq
*rxq
= sfc_efx_rxq_by_dp_rxq(dp_rxq
);
492 rte_free(rxq
->sw_desc
);
496 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart
;
498 sfc_efx_rx_qstart(struct sfc_dp_rxq
*dp_rxq
,
499 __rte_unused
unsigned int evq_read_ptr
)
501 /* libefx-based datapath is specific to libefx-based PMD */
502 struct sfc_efx_rxq
*rxq
= sfc_efx_rxq_by_dp_rxq(dp_rxq
);
503 struct sfc_rxq
*crxq
= sfc_rxq_by_dp_rxq(dp_rxq
);
505 rxq
->common
= crxq
->common
;
507 rxq
->pending
= rxq
->completed
= rxq
->added
= rxq
->pushed
= 0;
509 sfc_efx_rx_qrefill(rxq
);
511 rxq
->flags
|= (SFC_EFX_RXQ_FLAG_STARTED
| SFC_EFX_RXQ_FLAG_RUNNING
);
516 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop
;
518 sfc_efx_rx_qstop(struct sfc_dp_rxq
*dp_rxq
,
519 __rte_unused
unsigned int *evq_read_ptr
)
521 struct sfc_efx_rxq
*rxq
= sfc_efx_rxq_by_dp_rxq(dp_rxq
);
523 rxq
->flags
&= ~SFC_EFX_RXQ_FLAG_RUNNING
;
525 /* libefx-based datapath is bound to libefx-based PMD and uses
526 * event queue structure directly. So, there is no necessity to
527 * return EvQ read pointer.
531 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge
;
533 sfc_efx_rx_qpurge(struct sfc_dp_rxq
*dp_rxq
)
535 struct sfc_efx_rxq
*rxq
= sfc_efx_rxq_by_dp_rxq(dp_rxq
);
537 struct sfc_efx_rx_sw_desc
*rxd
;
539 for (i
= rxq
->completed
; i
!= rxq
->added
; ++i
) {
540 rxd
= &rxq
->sw_desc
[i
& rxq
->ptr_mask
];
541 rte_mbuf_raw_free(rxd
->mbuf
);
543 /* Packed stream relies on 0 in inactive SW desc.
544 * Rx queue stop is not performance critical, so
545 * there is no harm to do it always.
551 rxq
->flags
&= ~SFC_EFX_RXQ_FLAG_STARTED
;
554 struct sfc_dp_rx sfc_efx_rx
= {
556 .name
= SFC_KVARG_DATAPATH_EFX
,
560 .features
= SFC_DP_RX_FEAT_SCATTER
|
561 SFC_DP_RX_FEAT_CHECKSUM
,
562 .qsize_up_rings
= sfc_efx_rx_qsize_up_rings
,
563 .qcreate
= sfc_efx_rx_qcreate
,
564 .qdestroy
= sfc_efx_rx_qdestroy
,
565 .qstart
= sfc_efx_rx_qstart
,
566 .qstop
= sfc_efx_rx_qstop
,
567 .qpurge
= sfc_efx_rx_qpurge
,
568 .supported_ptypes_get
= sfc_efx_supported_ptypes_get
,
569 .qdesc_npending
= sfc_efx_rx_qdesc_npending
,
570 .qdesc_status
= sfc_efx_rx_qdesc_status
,
571 .pkt_burst
= sfc_efx_recv_pkts
,
575 sfc_rx_qflush(struct sfc_adapter
*sa
, unsigned int sw_index
)
577 struct sfc_rxq_info
*rxq_info
;
579 unsigned int retry_count
;
580 unsigned int wait_count
;
583 rxq_info
= &sfc_sa2shared(sa
)->rxq_info
[sw_index
];
584 SFC_ASSERT(rxq_info
->state
& SFC_RXQ_STARTED
);
586 rxq
= &sa
->rxq_ctrl
[sw_index
];
589 * Retry Rx queue flushing in the case of flush failed or
590 * timeout. In the worst case it can delay for 6 seconds.
592 for (retry_count
= 0;
593 ((rxq_info
->state
& SFC_RXQ_FLUSHED
) == 0) &&
594 (retry_count
< SFC_RX_QFLUSH_ATTEMPTS
);
596 rc
= efx_rx_qflush(rxq
->common
);
598 rxq_info
->state
|= (rc
== EALREADY
) ?
599 SFC_RXQ_FLUSHED
: SFC_RXQ_FLUSH_FAILED
;
602 rxq_info
->state
&= ~SFC_RXQ_FLUSH_FAILED
;
603 rxq_info
->state
|= SFC_RXQ_FLUSHING
;
606 * Wait for Rx queue flush done or failed event at least
607 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
608 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
609 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
613 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS
);
614 sfc_ev_qpoll(rxq
->evq
);
615 } while ((rxq_info
->state
& SFC_RXQ_FLUSHING
) &&
616 (wait_count
++ < SFC_RX_QFLUSH_POLL_ATTEMPTS
));
618 if (rxq_info
->state
& SFC_RXQ_FLUSHING
)
619 sfc_err(sa
, "RxQ %u flush timed out", sw_index
);
621 if (rxq_info
->state
& SFC_RXQ_FLUSH_FAILED
)
622 sfc_err(sa
, "RxQ %u flush failed", sw_index
);
624 if (rxq_info
->state
& SFC_RXQ_FLUSHED
)
625 sfc_notice(sa
, "RxQ %u flushed", sw_index
);
628 sa
->priv
.dp_rx
->qpurge(rxq_info
->dp
);
632 sfc_rx_default_rxq_set_filter(struct sfc_adapter
*sa
, struct sfc_rxq
*rxq
)
634 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
635 boolean_t need_rss
= (rss
->channels
> 0) ? B_TRUE
: B_FALSE
;
636 struct sfc_port
*port
= &sa
->port
;
640 * If promiscuous or all-multicast mode has been requested, setting
641 * filter for the default Rx queue might fail, in particular, while
642 * running over PCI function which is not a member of corresponding
643 * privilege groups; if this occurs, few iterations will be made to
644 * repeat this step without promiscuous and all-multicast flags set
647 rc
= efx_mac_filter_default_rxq_set(sa
->nic
, rxq
->common
, need_rss
);
650 else if (rc
!= EOPNOTSUPP
)
654 sfc_warn(sa
, "promiscuous mode has been requested, "
655 "but the HW rejects it");
656 sfc_warn(sa
, "promiscuous mode will be disabled");
658 port
->promisc
= B_FALSE
;
659 rc
= sfc_set_rx_mode(sa
);
666 if (port
->allmulti
) {
667 sfc_warn(sa
, "all-multicast mode has been requested, "
668 "but the HW rejects it");
669 sfc_warn(sa
, "all-multicast mode will be disabled");
671 port
->allmulti
= B_FALSE
;
672 rc
= sfc_set_rx_mode(sa
);
683 sfc_rx_qstart(struct sfc_adapter
*sa
, unsigned int sw_index
)
685 struct sfc_rxq_info
*rxq_info
;
690 sfc_log_init(sa
, "sw_index=%u", sw_index
);
692 SFC_ASSERT(sw_index
< sfc_sa2shared(sa
)->rxq_count
);
694 rxq_info
= &sfc_sa2shared(sa
)->rxq_info
[sw_index
];
695 SFC_ASSERT(rxq_info
->state
== SFC_RXQ_INITIALIZED
);
697 rxq
= &sa
->rxq_ctrl
[sw_index
];
700 rc
= sfc_ev_qstart(evq
, sfc_evq_index_by_rxq_sw_index(sa
, sw_index
));
704 switch (rxq_info
->type
) {
705 case EFX_RXQ_TYPE_DEFAULT
:
706 rc
= efx_rx_qcreate(sa
->nic
, rxq
->hw_index
, 0, rxq_info
->type
,
708 &rxq
->mem
, rxq_info
->entries
, 0 /* not used on EF10 */,
709 rxq_info
->type_flags
, evq
->common
, &rxq
->common
);
711 case EFX_RXQ_TYPE_ES_SUPER_BUFFER
: {
712 struct rte_mempool
*mp
= rxq_info
->refill_mb_pool
;
713 struct rte_mempool_info mp_info
;
715 rc
= rte_mempool_ops_get_info(mp
, &mp_info
);
717 /* Positive errno is used in the driver */
719 goto fail_mp_get_info
;
721 if (mp_info
.contig_block_size
<= 0) {
723 goto fail_bad_contig_block_size
;
725 rc
= efx_rx_qcreate_es_super_buffer(sa
->nic
, rxq
->hw_index
, 0,
726 mp_info
.contig_block_size
, rxq
->buf_size
,
727 mp
->header_size
+ mp
->elt_size
+ mp
->trailer_size
,
728 sa
->rxd_wait_timeout_ns
,
729 &rxq
->mem
, rxq_info
->entries
, rxq_info
->type_flags
,
730 evq
->common
, &rxq
->common
);
737 goto fail_rx_qcreate
;
739 efx_rx_qenable(rxq
->common
);
741 rc
= sa
->priv
.dp_rx
->qstart(rxq_info
->dp
, evq
->read_ptr
);
745 rxq_info
->state
|= SFC_RXQ_STARTED
;
747 if (sw_index
== 0 && !sfc_sa2shared(sa
)->isolated
) {
748 rc
= sfc_rx_default_rxq_set_filter(sa
, rxq
);
750 goto fail_mac_filter_default_rxq_set
;
753 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
754 sa
->eth_dev
->data
->rx_queue_state
[sw_index
] =
755 RTE_ETH_QUEUE_STATE_STARTED
;
759 fail_mac_filter_default_rxq_set
:
760 sa
->priv
.dp_rx
->qstop(rxq_info
->dp
, &rxq
->evq
->read_ptr
);
763 sfc_rx_qflush(sa
, sw_index
);
766 fail_bad_contig_block_size
:
775 sfc_rx_qstop(struct sfc_adapter
*sa
, unsigned int sw_index
)
777 struct sfc_rxq_info
*rxq_info
;
780 sfc_log_init(sa
, "sw_index=%u", sw_index
);
782 SFC_ASSERT(sw_index
< sfc_sa2shared(sa
)->rxq_count
);
784 rxq_info
= &sfc_sa2shared(sa
)->rxq_info
[sw_index
];
786 if (rxq_info
->state
== SFC_RXQ_INITIALIZED
)
788 SFC_ASSERT(rxq_info
->state
& SFC_RXQ_STARTED
);
790 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
791 sa
->eth_dev
->data
->rx_queue_state
[sw_index
] =
792 RTE_ETH_QUEUE_STATE_STOPPED
;
794 rxq
= &sa
->rxq_ctrl
[sw_index
];
795 sa
->priv
.dp_rx
->qstop(rxq_info
->dp
, &rxq
->evq
->read_ptr
);
798 efx_mac_filter_default_rxq_clear(sa
->nic
);
800 sfc_rx_qflush(sa
, sw_index
);
802 rxq_info
->state
= SFC_RXQ_INITIALIZED
;
804 efx_rx_qdestroy(rxq
->common
);
806 sfc_ev_qstop(rxq
->evq
);
810 sfc_rx_get_dev_offload_caps(struct sfc_adapter
*sa
)
812 const efx_nic_cfg_t
*encp
= efx_nic_cfg_get(sa
->nic
);
815 caps
|= DEV_RX_OFFLOAD_JUMBO_FRAME
;
817 if (sa
->priv
.dp_rx
->features
& SFC_DP_RX_FEAT_CHECKSUM
) {
818 caps
|= DEV_RX_OFFLOAD_IPV4_CKSUM
;
819 caps
|= DEV_RX_OFFLOAD_UDP_CKSUM
;
820 caps
|= DEV_RX_OFFLOAD_TCP_CKSUM
;
823 if (encp
->enc_tunnel_encapsulations_supported
&&
824 (sa
->priv
.dp_rx
->features
& SFC_DP_RX_FEAT_TUNNELS
))
825 caps
|= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
;
831 sfc_rx_get_queue_offload_caps(struct sfc_adapter
*sa
)
835 if (sa
->priv
.dp_rx
->features
& SFC_DP_RX_FEAT_SCATTER
)
836 caps
|= DEV_RX_OFFLOAD_SCATTER
;
842 sfc_rx_qcheck_conf(struct sfc_adapter
*sa
, unsigned int rxq_max_fill_level
,
843 const struct rte_eth_rxconf
*rx_conf
,
844 __rte_unused
uint64_t offloads
)
848 if (rx_conf
->rx_thresh
.pthresh
!= 0 ||
849 rx_conf
->rx_thresh
.hthresh
!= 0 ||
850 rx_conf
->rx_thresh
.wthresh
!= 0) {
852 "RxQ prefetch/host/writeback thresholds are not supported");
855 if (rx_conf
->rx_free_thresh
> rxq_max_fill_level
) {
857 "RxQ free threshold too large: %u vs maximum %u",
858 rx_conf
->rx_free_thresh
, rxq_max_fill_level
);
862 if (rx_conf
->rx_drop_en
== 0) {
863 sfc_err(sa
, "RxQ drop disable is not supported");
871 sfc_rx_mbuf_data_alignment(struct rte_mempool
*mb_pool
)
876 /* The mbuf object itself is always cache line aligned */
877 order
= rte_bsf32(RTE_CACHE_LINE_SIZE
);
879 /* Data offset from mbuf object start */
880 data_off
= sizeof(struct rte_mbuf
) + rte_pktmbuf_priv_size(mb_pool
) +
881 RTE_PKTMBUF_HEADROOM
;
883 order
= MIN(order
, rte_bsf32(data_off
));
889 sfc_rx_mb_pool_buf_size(struct sfc_adapter
*sa
, struct rte_mempool
*mb_pool
)
891 const efx_nic_cfg_t
*encp
= efx_nic_cfg_get(sa
->nic
);
892 const uint32_t nic_align_start
= MAX(1, encp
->enc_rx_buf_align_start
);
893 const uint32_t nic_align_end
= MAX(1, encp
->enc_rx_buf_align_end
);
895 unsigned int buf_aligned
;
896 unsigned int start_alignment
;
897 unsigned int end_padding_alignment
;
899 /* Below it is assumed that both alignments are power of 2 */
900 SFC_ASSERT(rte_is_power_of_2(nic_align_start
));
901 SFC_ASSERT(rte_is_power_of_2(nic_align_end
));
904 * mbuf is always cache line aligned, double-check
905 * that it meets rx buffer start alignment requirements.
908 /* Start from mbuf pool data room size */
909 buf_size
= rte_pktmbuf_data_room_size(mb_pool
);
911 /* Remove headroom */
912 if (buf_size
<= RTE_PKTMBUF_HEADROOM
) {
914 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
915 mb_pool
->name
, buf_size
, RTE_PKTMBUF_HEADROOM
);
918 buf_size
-= RTE_PKTMBUF_HEADROOM
;
920 /* Calculate guaranteed data start alignment */
921 buf_aligned
= sfc_rx_mbuf_data_alignment(mb_pool
);
923 /* Reserve space for start alignment */
924 if (buf_aligned
< nic_align_start
) {
925 start_alignment
= nic_align_start
- buf_aligned
;
926 if (buf_size
<= start_alignment
) {
928 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
930 rte_pktmbuf_data_room_size(mb_pool
),
931 RTE_PKTMBUF_HEADROOM
, start_alignment
);
934 buf_aligned
= nic_align_start
;
935 buf_size
-= start_alignment
;
940 /* Make sure that end padding does not write beyond the buffer */
941 if (buf_aligned
< nic_align_end
) {
943 * Estimate space which can be lost. If guarnteed buffer
944 * size is odd, lost space is (nic_align_end - 1). More
945 * accurate formula is below.
947 end_padding_alignment
= nic_align_end
-
948 MIN(buf_aligned
, 1u << (rte_bsf32(buf_size
) - 1));
949 if (buf_size
<= end_padding_alignment
) {
951 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
953 rte_pktmbuf_data_room_size(mb_pool
),
954 RTE_PKTMBUF_HEADROOM
, start_alignment
,
955 end_padding_alignment
);
958 buf_size
-= end_padding_alignment
;
961 * Start is aligned the same or better than end,
964 buf_size
= P2ALIGN(buf_size
, nic_align_end
);
971 sfc_rx_qinit(struct sfc_adapter
*sa
, unsigned int sw_index
,
972 uint16_t nb_rx_desc
, unsigned int socket_id
,
973 const struct rte_eth_rxconf
*rx_conf
,
974 struct rte_mempool
*mb_pool
)
976 const efx_nic_cfg_t
*encp
= efx_nic_cfg_get(sa
->nic
);
977 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
979 unsigned int rxq_entries
;
980 unsigned int evq_entries
;
981 unsigned int rxq_max_fill_level
;
984 struct sfc_rxq_info
*rxq_info
;
987 struct sfc_dp_rx_qcreate_info info
;
988 struct sfc_dp_rx_hw_limits hw_limits
;
989 uint16_t rx_free_thresh
;
992 memset(&hw_limits
, 0, sizeof(hw_limits
));
993 hw_limits
.rxq_max_entries
= sa
->rxq_max_entries
;
994 hw_limits
.rxq_min_entries
= sa
->rxq_min_entries
;
995 hw_limits
.evq_max_entries
= sa
->evq_max_entries
;
996 hw_limits
.evq_min_entries
= sa
->evq_min_entries
;
998 rc
= sa
->priv
.dp_rx
->qsize_up_rings(nb_rx_desc
, &hw_limits
, mb_pool
,
999 &rxq_entries
, &evq_entries
,
1000 &rxq_max_fill_level
);
1002 goto fail_size_up_rings
;
1003 SFC_ASSERT(rxq_entries
>= sa
->rxq_min_entries
);
1004 SFC_ASSERT(rxq_entries
<= sa
->rxq_max_entries
);
1005 SFC_ASSERT(rxq_max_fill_level
<= nb_rx_desc
);
1007 offloads
= rx_conf
->offloads
|
1008 sa
->eth_dev
->data
->dev_conf
.rxmode
.offloads
;
1009 rc
= sfc_rx_qcheck_conf(sa
, rxq_max_fill_level
, rx_conf
, offloads
);
1013 buf_size
= sfc_rx_mb_pool_buf_size(sa
, mb_pool
);
1014 if (buf_size
== 0) {
1015 sfc_err(sa
, "RxQ %u mbuf pool object size is too small",
1021 if (!sfc_rx_check_scatter(sa
->port
.pdu
, buf_size
,
1022 encp
->enc_rx_prefix_size
,
1023 (offloads
& DEV_RX_OFFLOAD_SCATTER
),
1025 sfc_err(sa
, "RxQ %u MTU check failed: %s", sw_index
, error
);
1026 sfc_err(sa
, "RxQ %u calculated Rx buffer size is %u vs "
1027 "PDU size %u plus Rx prefix %u bytes",
1028 sw_index
, buf_size
, (unsigned int)sa
->port
.pdu
,
1029 encp
->enc_rx_prefix_size
);
1034 SFC_ASSERT(sw_index
< sfc_sa2shared(sa
)->rxq_count
);
1035 rxq_info
= &sfc_sa2shared(sa
)->rxq_info
[sw_index
];
1037 SFC_ASSERT(rxq_entries
<= rxq_info
->max_entries
);
1038 rxq_info
->entries
= rxq_entries
;
1040 if (sa
->priv
.dp_rx
->dp
.hw_fw_caps
& SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER
)
1041 rxq_info
->type
= EFX_RXQ_TYPE_ES_SUPER_BUFFER
;
1043 rxq_info
->type
= EFX_RXQ_TYPE_DEFAULT
;
1045 rxq_info
->type_flags
=
1046 (offloads
& DEV_RX_OFFLOAD_SCATTER
) ?
1047 EFX_RXQ_FLAG_SCATTER
: EFX_RXQ_FLAG_NONE
;
1049 if ((encp
->enc_tunnel_encapsulations_supported
!= 0) &&
1050 (sa
->priv
.dp_rx
->features
& SFC_DP_RX_FEAT_TUNNELS
))
1051 rxq_info
->type_flags
|= EFX_RXQ_FLAG_INNER_CLASSES
;
1053 rc
= sfc_ev_qinit(sa
, SFC_EVQ_TYPE_RX
, sw_index
,
1054 evq_entries
, socket_id
, &evq
);
1058 rxq
= &sa
->rxq_ctrl
[sw_index
];
1060 rxq
->hw_index
= sw_index
;
1062 * If Rx refill threshold is specified (its value is non zero) in
1063 * Rx configuration, use specified value. Otherwise use 1/8 of
1064 * the Rx descriptors number as the default. It allows to keep
1065 * Rx ring full-enough and does not refill too aggressive if
1066 * packet rate is high.
1068 * Since PMD refills in bulks waiting for full bulk may be
1069 * refilled (basically round down), it is better to round up
1070 * here to mitigate it a bit.
1072 rx_free_thresh
= (rx_conf
->rx_free_thresh
!= 0) ?
1073 rx_conf
->rx_free_thresh
: EFX_DIV_ROUND_UP(nb_rx_desc
, 8);
1074 /* Rx refill threshold cannot be smaller than refill bulk */
1075 rxq_info
->refill_threshold
=
1076 RTE_MAX(rx_free_thresh
, SFC_RX_REFILL_BULK
);
1077 rxq_info
->refill_mb_pool
= mb_pool
;
1078 rxq
->buf_size
= buf_size
;
1080 rc
= sfc_dma_alloc(sa
, "rxq", sw_index
,
1081 efx_rxq_size(sa
->nic
, rxq_info
->entries
),
1082 socket_id
, &rxq
->mem
);
1084 goto fail_dma_alloc
;
1086 memset(&info
, 0, sizeof(info
));
1087 info
.refill_mb_pool
= rxq_info
->refill_mb_pool
;
1088 info
.max_fill_level
= rxq_max_fill_level
;
1089 info
.refill_threshold
= rxq_info
->refill_threshold
;
1090 info
.buf_size
= buf_size
;
1091 info
.batch_max
= encp
->enc_rx_batch_max
;
1092 info
.prefix_size
= encp
->enc_rx_prefix_size
;
1094 if (rss
->hash_support
== EFX_RX_HASH_AVAILABLE
&& rss
->channels
> 0)
1095 info
.flags
|= SFC_RXQ_FLAG_RSS_HASH
;
1097 info
.rxq_entries
= rxq_info
->entries
;
1098 info
.rxq_hw_ring
= rxq
->mem
.esm_base
;
1099 info
.evq_entries
= evq_entries
;
1100 info
.evq_hw_ring
= evq
->mem
.esm_base
;
1101 info
.hw_index
= rxq
->hw_index
;
1102 info
.mem_bar
= sa
->mem_bar
.esb_base
;
1103 info
.vi_window_shift
= encp
->enc_vi_window_shift
;
1105 rc
= sa
->priv
.dp_rx
->qcreate(sa
->eth_dev
->data
->port_id
, sw_index
,
1106 &RTE_ETH_DEV_TO_PCI(sa
->eth_dev
)->addr
,
1107 socket_id
, &info
, &rxq_info
->dp
);
1109 goto fail_dp_rx_qcreate
;
1111 evq
->dp_rxq
= rxq_info
->dp
;
1113 rxq_info
->state
= SFC_RXQ_INITIALIZED
;
1115 rxq_info
->deferred_start
= (rx_conf
->rx_deferred_start
!= 0);
1120 sfc_dma_free(sa
, &rxq
->mem
);
1126 rxq_info
->entries
= 0;
1130 sfc_log_init(sa
, "failed %d", rc
);
1135 sfc_rx_qfini(struct sfc_adapter
*sa
, unsigned int sw_index
)
1137 struct sfc_rxq_info
*rxq_info
;
1138 struct sfc_rxq
*rxq
;
1140 SFC_ASSERT(sw_index
< sfc_sa2shared(sa
)->rxq_count
);
1141 sa
->eth_dev
->data
->rx_queues
[sw_index
] = NULL
;
1143 rxq_info
= &sfc_sa2shared(sa
)->rxq_info
[sw_index
];
1145 SFC_ASSERT(rxq_info
->state
== SFC_RXQ_INITIALIZED
);
1147 sa
->priv
.dp_rx
->qdestroy(rxq_info
->dp
);
1148 rxq_info
->dp
= NULL
;
1150 rxq_info
->state
&= ~SFC_RXQ_INITIALIZED
;
1151 rxq_info
->entries
= 0;
1153 rxq
= &sa
->rxq_ctrl
[sw_index
];
1155 sfc_dma_free(sa
, &rxq
->mem
);
1157 sfc_ev_qfini(rxq
->evq
);
1162 * Mapping between RTE RSS hash functions and their EFX counterparts.
1164 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map
[] = {
1165 { ETH_RSS_NONFRAG_IPV4_TCP
,
1166 EFX_RX_HASH(IPV4_TCP
, 4TUPLE
) },
1167 { ETH_RSS_NONFRAG_IPV4_UDP
,
1168 EFX_RX_HASH(IPV4_UDP
, 4TUPLE
) },
1169 { ETH_RSS_NONFRAG_IPV6_TCP
| ETH_RSS_IPV6_TCP_EX
,
1170 EFX_RX_HASH(IPV6_TCP
, 4TUPLE
) },
1171 { ETH_RSS_NONFRAG_IPV6_UDP
| ETH_RSS_IPV6_UDP_EX
,
1172 EFX_RX_HASH(IPV6_UDP
, 4TUPLE
) },
1173 { ETH_RSS_IPV4
| ETH_RSS_FRAG_IPV4
| ETH_RSS_NONFRAG_IPV4_OTHER
,
1174 EFX_RX_HASH(IPV4_TCP
, 2TUPLE
) | EFX_RX_HASH(IPV4_UDP
, 2TUPLE
) |
1175 EFX_RX_HASH(IPV4
, 2TUPLE
) },
1176 { ETH_RSS_IPV6
| ETH_RSS_FRAG_IPV6
| ETH_RSS_NONFRAG_IPV6_OTHER
|
1178 EFX_RX_HASH(IPV6_TCP
, 2TUPLE
) | EFX_RX_HASH(IPV6_UDP
, 2TUPLE
) |
1179 EFX_RX_HASH(IPV6
, 2TUPLE
) }
1182 static efx_rx_hash_type_t
1183 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type
,
1184 unsigned int *hash_type_flags_supported
,
1185 unsigned int nb_hash_type_flags_supported
)
1187 efx_rx_hash_type_t hash_type_masked
= 0;
1190 for (i
= 0; i
< nb_hash_type_flags_supported
; ++i
) {
1191 unsigned int class_tuple_lbn
[] = {
1192 EFX_RX_CLASS_IPV4_TCP_LBN
,
1193 EFX_RX_CLASS_IPV4_UDP_LBN
,
1194 EFX_RX_CLASS_IPV4_LBN
,
1195 EFX_RX_CLASS_IPV6_TCP_LBN
,
1196 EFX_RX_CLASS_IPV6_UDP_LBN
,
1197 EFX_RX_CLASS_IPV6_LBN
1200 for (j
= 0; j
< RTE_DIM(class_tuple_lbn
); ++j
) {
1201 unsigned int tuple_mask
= EFX_RX_CLASS_HASH_4TUPLE
;
1204 tuple_mask
<<= class_tuple_lbn
[j
];
1205 flag
= hash_type
& tuple_mask
;
1207 if (flag
== hash_type_flags_supported
[i
])
1208 hash_type_masked
|= flag
;
1212 return hash_type_masked
;
1216 sfc_rx_hash_init(struct sfc_adapter
*sa
)
1218 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
1219 const efx_nic_cfg_t
*encp
= efx_nic_cfg_get(sa
->nic
);
1220 uint32_t alg_mask
= encp
->enc_rx_scale_hash_alg_mask
;
1221 efx_rx_hash_alg_t alg
;
1222 unsigned int flags_supp
[EFX_RX_HASH_NFLAGS
];
1223 unsigned int nb_flags_supp
;
1224 struct sfc_rss_hf_rte_to_efx
*hf_map
;
1225 struct sfc_rss_hf_rte_to_efx
*entry
;
1226 efx_rx_hash_type_t efx_hash_types
;
1230 if (alg_mask
& (1U << EFX_RX_HASHALG_TOEPLITZ
))
1231 alg
= EFX_RX_HASHALG_TOEPLITZ
;
1232 else if (alg_mask
& (1U << EFX_RX_HASHALG_PACKED_STREAM
))
1233 alg
= EFX_RX_HASHALG_PACKED_STREAM
;
1237 rc
= efx_rx_scale_hash_flags_get(sa
->nic
, alg
, flags_supp
,
1238 RTE_DIM(flags_supp
), &nb_flags_supp
);
1242 hf_map
= rte_calloc_socket("sfc-rss-hf-map",
1243 RTE_DIM(sfc_rss_hf_map
),
1244 sizeof(*hf_map
), 0, sa
->socket_id
);
1250 for (i
= 0; i
< RTE_DIM(sfc_rss_hf_map
); ++i
) {
1251 efx_rx_hash_type_t ht
;
1253 ht
= sfc_rx_hash_types_mask_supp(sfc_rss_hf_map
[i
].efx
,
1254 flags_supp
, nb_flags_supp
);
1256 entry
->rte
= sfc_rss_hf_map
[i
].rte
;
1258 efx_hash_types
|= ht
;
1263 rss
->hash_alg
= alg
;
1264 rss
->hf_map_nb_entries
= (unsigned int)(entry
- hf_map
);
1265 rss
->hf_map
= hf_map
;
1266 rss
->hash_types
= efx_hash_types
;
1272 sfc_rx_hash_fini(struct sfc_adapter
*sa
)
1274 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
1276 rte_free(rss
->hf_map
);
1280 sfc_rx_hf_rte_to_efx(struct sfc_adapter
*sa
, uint64_t rte
,
1281 efx_rx_hash_type_t
*efx
)
1283 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
1284 efx_rx_hash_type_t hash_types
= 0;
1287 for (i
= 0; i
< rss
->hf_map_nb_entries
; ++i
) {
1288 uint64_t rte_mask
= rss
->hf_map
[i
].rte
;
1290 if ((rte
& rte_mask
) != 0) {
1292 hash_types
|= rss
->hf_map
[i
].efx
;
1297 sfc_err(sa
, "unsupported hash functions requested");
1307 sfc_rx_hf_efx_to_rte(struct sfc_rss
*rss
, efx_rx_hash_type_t efx
)
1312 for (i
= 0; i
< rss
->hf_map_nb_entries
; ++i
) {
1313 efx_rx_hash_type_t hash_type
= rss
->hf_map
[i
].efx
;
1315 if ((efx
& hash_type
) == hash_type
)
1316 rte
|= rss
->hf_map
[i
].rte
;
1323 sfc_rx_process_adv_conf_rss(struct sfc_adapter
*sa
,
1324 struct rte_eth_rss_conf
*conf
)
1326 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
1327 efx_rx_hash_type_t efx_hash_types
= rss
->hash_types
;
1328 uint64_t rss_hf
= sfc_rx_hf_efx_to_rte(rss
, efx_hash_types
);
1331 if (rss
->context_type
!= EFX_RX_SCALE_EXCLUSIVE
) {
1332 if ((conf
->rss_hf
!= 0 && conf
->rss_hf
!= rss_hf
) ||
1333 conf
->rss_key
!= NULL
)
1337 if (conf
->rss_hf
!= 0) {
1338 rc
= sfc_rx_hf_rte_to_efx(sa
, conf
->rss_hf
, &efx_hash_types
);
1343 if (conf
->rss_key
!= NULL
) {
1344 if (conf
->rss_key_len
!= sizeof(rss
->key
)) {
1345 sfc_err(sa
, "RSS key size is wrong (should be %lu)",
1349 rte_memcpy(rss
->key
, conf
->rss_key
, sizeof(rss
->key
));
1352 rss
->hash_types
= efx_hash_types
;
1358 sfc_rx_rss_config(struct sfc_adapter
*sa
)
1360 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
1363 if (rss
->channels
> 0) {
1364 rc
= efx_rx_scale_mode_set(sa
->nic
, EFX_RSS_CONTEXT_DEFAULT
,
1365 rss
->hash_alg
, rss
->hash_types
,
1370 rc
= efx_rx_scale_key_set(sa
->nic
, EFX_RSS_CONTEXT_DEFAULT
,
1371 rss
->key
, sizeof(rss
->key
));
1375 rc
= efx_rx_scale_tbl_set(sa
->nic
, EFX_RSS_CONTEXT_DEFAULT
,
1376 rss
->tbl
, RTE_DIM(rss
->tbl
));
1384 sfc_rx_start(struct sfc_adapter
*sa
)
1386 struct sfc_adapter_shared
* const sas
= sfc_sa2shared(sa
);
1387 unsigned int sw_index
;
1390 sfc_log_init(sa
, "rxq_count=%u", sas
->rxq_count
);
1392 rc
= efx_rx_init(sa
->nic
);
1396 rc
= sfc_rx_rss_config(sa
);
1398 goto fail_rss_config
;
1400 for (sw_index
= 0; sw_index
< sas
->rxq_count
; ++sw_index
) {
1401 if (sas
->rxq_info
[sw_index
].state
== SFC_RXQ_INITIALIZED
&&
1402 (!sas
->rxq_info
[sw_index
].deferred_start
||
1403 sas
->rxq_info
[sw_index
].deferred_started
)) {
1404 rc
= sfc_rx_qstart(sa
, sw_index
);
1406 goto fail_rx_qstart
;
1413 while (sw_index
-- > 0)
1414 sfc_rx_qstop(sa
, sw_index
);
1417 efx_rx_fini(sa
->nic
);
1420 sfc_log_init(sa
, "failed %d", rc
);
1425 sfc_rx_stop(struct sfc_adapter
*sa
)
1427 struct sfc_adapter_shared
* const sas
= sfc_sa2shared(sa
);
1428 unsigned int sw_index
;
1430 sfc_log_init(sa
, "rxq_count=%u", sas
->rxq_count
);
1432 sw_index
= sas
->rxq_count
;
1433 while (sw_index
-- > 0) {
1434 if (sas
->rxq_info
[sw_index
].state
& SFC_RXQ_STARTED
)
1435 sfc_rx_qstop(sa
, sw_index
);
1438 efx_rx_fini(sa
->nic
);
1442 sfc_rx_qinit_info(struct sfc_adapter
*sa
, unsigned int sw_index
)
1444 struct sfc_adapter_shared
* const sas
= sfc_sa2shared(sa
);
1445 struct sfc_rxq_info
*rxq_info
= &sas
->rxq_info
[sw_index
];
1446 const efx_nic_cfg_t
*encp
= efx_nic_cfg_get(sa
->nic
);
1447 unsigned int max_entries
;
1449 max_entries
= encp
->enc_rxq_max_ndescs
;
1450 SFC_ASSERT(rte_is_power_of_2(max_entries
));
1452 rxq_info
->max_entries
= max_entries
;
1458 sfc_rx_check_mode(struct sfc_adapter
*sa
, struct rte_eth_rxmode
*rxmode
)
1460 struct sfc_adapter_shared
* const sas
= sfc_sa2shared(sa
);
1461 uint64_t offloads_supported
= sfc_rx_get_dev_offload_caps(sa
) |
1462 sfc_rx_get_queue_offload_caps(sa
);
1463 struct sfc_rss
*rss
= &sas
->rss
;
1466 switch (rxmode
->mq_mode
) {
1467 case ETH_MQ_RX_NONE
:
1468 /* No special checks are required */
1471 if (rss
->context_type
== EFX_RX_SCALE_UNAVAILABLE
) {
1472 sfc_err(sa
, "RSS is not available");
1477 sfc_err(sa
, "Rx multi-queue mode %u not supported",
1483 * Requested offloads are validated against supported by ethdev,
1484 * so unsupported offloads cannot be added as the result of
1487 if ((rxmode
->offloads
& DEV_RX_OFFLOAD_CHECKSUM
) !=
1488 (offloads_supported
& DEV_RX_OFFLOAD_CHECKSUM
)) {
1489 sfc_warn(sa
, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
1490 rxmode
->offloads
|= DEV_RX_OFFLOAD_CHECKSUM
;
1493 if ((offloads_supported
& DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
) &&
1494 (~rxmode
->offloads
& DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
)) {
1495 sfc_warn(sa
, "Rx outer IPv4 checksum offload cannot be disabled - always on");
1496 rxmode
->offloads
|= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
;
1503 * Destroy excess queues that are no longer needed after reconfiguration
1504 * or complete close.
1507 sfc_rx_fini_queues(struct sfc_adapter
*sa
, unsigned int nb_rx_queues
)
1509 struct sfc_adapter_shared
* const sas
= sfc_sa2shared(sa
);
1512 SFC_ASSERT(nb_rx_queues
<= sas
->rxq_count
);
1514 sw_index
= sas
->rxq_count
;
1515 while (--sw_index
>= (int)nb_rx_queues
) {
1516 if (sas
->rxq_info
[sw_index
].state
& SFC_RXQ_INITIALIZED
)
1517 sfc_rx_qfini(sa
, sw_index
);
1520 sas
->rxq_count
= nb_rx_queues
;
1524 * Initialize Rx subsystem.
1526 * Called at device (re)configuration stage when number of receive queues is
1527 * specified together with other device level receive configuration.
1529 * It should be used to allocate NUMA-unaware resources.
1532 sfc_rx_configure(struct sfc_adapter
*sa
)
1534 struct sfc_adapter_shared
* const sas
= sfc_sa2shared(sa
);
1535 struct sfc_rss
*rss
= &sas
->rss
;
1536 struct rte_eth_conf
*dev_conf
= &sa
->eth_dev
->data
->dev_conf
;
1537 const unsigned int nb_rx_queues
= sa
->eth_dev
->data
->nb_rx_queues
;
1540 sfc_log_init(sa
, "nb_rx_queues=%u (old %u)",
1541 nb_rx_queues
, sas
->rxq_count
);
1543 rc
= sfc_rx_check_mode(sa
, &dev_conf
->rxmode
);
1545 goto fail_check_mode
;
1547 if (nb_rx_queues
== sas
->rxq_count
)
1550 if (sas
->rxq_info
== NULL
) {
1552 sas
->rxq_info
= rte_calloc_socket("sfc-rxqs", nb_rx_queues
,
1553 sizeof(sas
->rxq_info
[0]), 0,
1555 if (sas
->rxq_info
== NULL
)
1556 goto fail_rxqs_alloc
;
1559 * Allocate primary process only RxQ control from heap
1560 * since it should not be shared.
1563 sa
->rxq_ctrl
= calloc(nb_rx_queues
, sizeof(sa
->rxq_ctrl
[0]));
1564 if (sa
->rxq_ctrl
== NULL
)
1565 goto fail_rxqs_ctrl_alloc
;
1567 struct sfc_rxq_info
*new_rxq_info
;
1568 struct sfc_rxq
*new_rxq_ctrl
;
1570 if (nb_rx_queues
< sas
->rxq_count
)
1571 sfc_rx_fini_queues(sa
, nb_rx_queues
);
1575 rte_realloc(sas
->rxq_info
,
1576 nb_rx_queues
* sizeof(sas
->rxq_info
[0]), 0);
1577 if (new_rxq_info
== NULL
&& nb_rx_queues
> 0)
1578 goto fail_rxqs_realloc
;
1581 new_rxq_ctrl
= realloc(sa
->rxq_ctrl
,
1582 nb_rx_queues
* sizeof(sa
->rxq_ctrl
[0]));
1583 if (new_rxq_ctrl
== NULL
&& nb_rx_queues
> 0)
1584 goto fail_rxqs_ctrl_realloc
;
1586 sas
->rxq_info
= new_rxq_info
;
1587 sa
->rxq_ctrl
= new_rxq_ctrl
;
1588 if (nb_rx_queues
> sas
->rxq_count
) {
1589 memset(&sas
->rxq_info
[sas
->rxq_count
], 0,
1590 (nb_rx_queues
- sas
->rxq_count
) *
1591 sizeof(sas
->rxq_info
[0]));
1592 memset(&sa
->rxq_ctrl
[sas
->rxq_count
], 0,
1593 (nb_rx_queues
- sas
->rxq_count
) *
1594 sizeof(sa
->rxq_ctrl
[0]));
1598 while (sas
->rxq_count
< nb_rx_queues
) {
1599 rc
= sfc_rx_qinit_info(sa
, sas
->rxq_count
);
1601 goto fail_rx_qinit_info
;
1607 rss
->channels
= (dev_conf
->rxmode
.mq_mode
== ETH_MQ_RX_RSS
) ?
1608 MIN(sas
->rxq_count
, EFX_MAXRSS
) : 0;
1610 if (rss
->channels
> 0) {
1611 struct rte_eth_rss_conf
*adv_conf_rss
;
1612 unsigned int sw_index
;
1614 for (sw_index
= 0; sw_index
< EFX_RSS_TBL_SIZE
; ++sw_index
)
1615 rss
->tbl
[sw_index
] = sw_index
% rss
->channels
;
1617 adv_conf_rss
= &dev_conf
->rx_adv_conf
.rss_conf
;
1618 rc
= sfc_rx_process_adv_conf_rss(sa
, adv_conf_rss
);
1620 goto fail_rx_process_adv_conf_rss
;
1625 fail_rx_process_adv_conf_rss
:
1627 fail_rxqs_ctrl_realloc
:
1629 fail_rxqs_ctrl_alloc
:
1634 sfc_log_init(sa
, "failed %d", rc
);
1639 * Shutdown Rx subsystem.
1641 * Called at device close stage, for example, before device shutdown.
1644 sfc_rx_close(struct sfc_adapter
*sa
)
1646 struct sfc_rss
*rss
= &sfc_sa2shared(sa
)->rss
;
1648 sfc_rx_fini_queues(sa
, 0);
1653 sa
->rxq_ctrl
= NULL
;
1655 rte_free(sfc_sa2shared(sa
)->rxq_info
);
1656 sfc_sa2shared(sa
)->rxq_info
= NULL
;