1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_debug.h>
11 #include <rte_cycles.h>
12 #include <rte_alarm.h>
13 #include <rte_branch_prediction.h>
18 #include "sfc_debug.h"
23 #include "sfc_kvargs.h"
26 /* Initial delay when waiting for event queue init complete event */
27 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
28 /* Maximum delay between event queue polling attempts */
29 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
30 /* Event queue init approx timeout */
31 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
33 /* Management event queue polling period in microseconds */
34 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
37 sfc_evq_type2str(enum sfc_evq_type type
)
40 case SFC_EVQ_TYPE_MGMT
:
53 sfc_ev_initialized(void *arg
)
55 struct sfc_evq
*evq
= arg
;
57 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
58 SFC_ASSERT(evq
->init_state
== SFC_EVQ_STARTING
||
59 evq
->init_state
== SFC_EVQ_STARTED
);
61 evq
->init_state
= SFC_EVQ_STARTED
;
67 sfc_ev_nop_rx(void *arg
, uint32_t label
, uint32_t id
,
68 uint32_t size
, uint16_t flags
)
70 struct sfc_evq
*evq
= arg
;
73 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
74 evq
->evq_index
, label
, id
, size
, flags
);
79 sfc_ev_efx_rx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
,
80 uint32_t size
, uint16_t flags
)
82 struct sfc_evq
*evq
= arg
;
83 struct sfc_efx_rxq
*rxq
;
85 unsigned int pending_id
;
88 struct sfc_efx_rx_sw_desc
*rxd
;
90 if (unlikely(evq
->exception
))
93 rxq
= sfc_efx_rxq_by_dp_rxq(evq
->dp_rxq
);
95 SFC_ASSERT(rxq
!= NULL
);
96 SFC_ASSERT(rxq
->evq
== evq
);
97 SFC_ASSERT(rxq
->flags
& SFC_EFX_RXQ_FLAG_STARTED
);
99 stop
= (id
+ 1) & rxq
->ptr_mask
;
100 pending_id
= rxq
->pending
& rxq
->ptr_mask
;
101 delta
= (stop
>= pending_id
) ? (stop
- pending_id
) :
102 (rxq
->ptr_mask
+ 1 - pending_id
+ stop
);
106 * Rx event with no new descriptors done and zero length
107 * is used to abort scattered packet when there is no room
110 if (unlikely(size
!= 0)) {
111 evq
->exception
= B_TRUE
;
113 "EVQ %u RxQ %u invalid RX abort "
114 "(id=%#x size=%u flags=%#x); needs restart",
115 evq
->evq_index
, rxq
->dp
.dpq
.queue_id
,
120 /* Add discard flag to the first fragment */
121 rxq
->sw_desc
[pending_id
].flags
|= EFX_DISCARD
;
122 /* Remove continue flag from the last fragment */
123 rxq
->sw_desc
[id
].flags
&= ~EFX_PKT_CONT
;
124 } else if (unlikely(delta
> rxq
->batch_max
)) {
125 evq
->exception
= B_TRUE
;
128 "EVQ %u RxQ %u completion out of order "
129 "(id=%#x delta=%u flags=%#x); needs restart",
130 evq
->evq_index
, rxq
->dp
.dpq
.queue_id
,
136 for (i
= pending_id
; i
!= stop
; i
= (i
+ 1) & rxq
->ptr_mask
) {
137 rxd
= &rxq
->sw_desc
[i
];
141 SFC_ASSERT(size
< (1 << 16));
142 rxd
->size
= (uint16_t)size
;
145 rxq
->pending
+= delta
;
152 sfc_ev_dp_rx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
,
153 __rte_unused
uint32_t size
, __rte_unused
uint16_t flags
)
155 struct sfc_evq
*evq
= arg
;
156 struct sfc_dp_rxq
*dp_rxq
;
158 dp_rxq
= evq
->dp_rxq
;
159 SFC_ASSERT(dp_rxq
!= NULL
);
161 SFC_ASSERT(evq
->sa
->dp_rx
->qrx_ev
!= NULL
);
162 return evq
->sa
->dp_rx
->qrx_ev(dp_rxq
, id
);
166 sfc_ev_nop_rx_ps(void *arg
, uint32_t label
, uint32_t id
,
167 uint32_t pkt_count
, uint16_t flags
)
169 struct sfc_evq
*evq
= arg
;
172 "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
173 evq
->evq_index
, label
, id
, pkt_count
, flags
);
177 /* It is not actually used on datapath, but required on RxQ flush */
179 sfc_ev_dp_rx_ps(void *arg
, __rte_unused
uint32_t label
, uint32_t id
,
180 __rte_unused
uint32_t pkt_count
, __rte_unused
uint16_t flags
)
182 struct sfc_evq
*evq
= arg
;
183 struct sfc_dp_rxq
*dp_rxq
;
185 dp_rxq
= evq
->dp_rxq
;
186 SFC_ASSERT(dp_rxq
!= NULL
);
188 if (evq
->sa
->dp_rx
->qrx_ps_ev
!= NULL
)
189 return evq
->sa
->dp_rx
->qrx_ps_ev(dp_rxq
, id
);
195 sfc_ev_nop_tx(void *arg
, uint32_t label
, uint32_t id
)
197 struct sfc_evq
*evq
= arg
;
199 sfc_err(evq
->sa
, "EVQ %u unexpected Tx event label=%u id=%#x",
200 evq
->evq_index
, label
, id
);
205 sfc_ev_tx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
)
207 struct sfc_evq
*evq
= arg
;
208 struct sfc_dp_txq
*dp_txq
;
209 struct sfc_efx_txq
*txq
;
213 dp_txq
= evq
->dp_txq
;
214 SFC_ASSERT(dp_txq
!= NULL
);
216 txq
= sfc_efx_txq_by_dp_txq(dp_txq
);
217 SFC_ASSERT(txq
->evq
== evq
);
219 if (unlikely((txq
->flags
& SFC_EFX_TXQ_FLAG_STARTED
) == 0))
222 stop
= (id
+ 1) & txq
->ptr_mask
;
223 id
= txq
->pending
& txq
->ptr_mask
;
225 delta
= (stop
>= id
) ? (stop
- id
) : (txq
->ptr_mask
+ 1 - id
+ stop
);
227 txq
->pending
+= delta
;
234 sfc_ev_dp_tx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
)
236 struct sfc_evq
*evq
= arg
;
237 struct sfc_dp_txq
*dp_txq
;
239 dp_txq
= evq
->dp_txq
;
240 SFC_ASSERT(dp_txq
!= NULL
);
242 SFC_ASSERT(evq
->sa
->dp_tx
->qtx_ev
!= NULL
);
243 return evq
->sa
->dp_tx
->qtx_ev(dp_txq
, id
);
247 sfc_ev_exception(void *arg
, uint32_t code
, __rte_unused
uint32_t data
)
249 struct sfc_evq
*evq
= arg
;
251 if (code
== EFX_EXCEPTION_UNKNOWN_SENSOREVT
)
254 evq
->exception
= B_TRUE
;
256 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
258 (code
== EFX_EXCEPTION_RX_RECOVERY
) ? "RX_RECOVERY" :
259 (code
== EFX_EXCEPTION_RX_DSC_ERROR
) ? "RX_DSC_ERROR" :
260 (code
== EFX_EXCEPTION_TX_DSC_ERROR
) ? "TX_DSC_ERROR" :
261 (code
== EFX_EXCEPTION_FWALERT_SRAM
) ? "FWALERT_SRAM" :
262 (code
== EFX_EXCEPTION_UNKNOWN_FWALERT
) ? "UNKNOWN_FWALERT" :
263 (code
== EFX_EXCEPTION_RX_ERROR
) ? "RX_ERROR" :
264 (code
== EFX_EXCEPTION_TX_ERROR
) ? "TX_ERROR" :
265 (code
== EFX_EXCEPTION_EV_ERROR
) ? "EV_ERROR" :
267 code
, data
, evq
->evq_index
);
273 sfc_ev_nop_rxq_flush_done(void *arg
, uint32_t rxq_hw_index
)
275 struct sfc_evq
*evq
= arg
;
277 sfc_err(evq
->sa
, "EVQ %u unexpected RxQ %u flush done",
278 evq
->evq_index
, rxq_hw_index
);
283 sfc_ev_rxq_flush_done(void *arg
, __rte_unused
uint32_t rxq_hw_index
)
285 struct sfc_evq
*evq
= arg
;
286 struct sfc_dp_rxq
*dp_rxq
;
289 dp_rxq
= evq
->dp_rxq
;
290 SFC_ASSERT(dp_rxq
!= NULL
);
292 rxq
= sfc_rxq_by_dp_rxq(dp_rxq
);
293 SFC_ASSERT(rxq
!= NULL
);
294 SFC_ASSERT(rxq
->hw_index
== rxq_hw_index
);
295 SFC_ASSERT(rxq
->evq
== evq
);
296 sfc_rx_qflush_done(rxq
);
302 sfc_ev_nop_rxq_flush_failed(void *arg
, uint32_t rxq_hw_index
)
304 struct sfc_evq
*evq
= arg
;
306 sfc_err(evq
->sa
, "EVQ %u unexpected RxQ %u flush failed",
307 evq
->evq_index
, rxq_hw_index
);
312 sfc_ev_rxq_flush_failed(void *arg
, __rte_unused
uint32_t rxq_hw_index
)
314 struct sfc_evq
*evq
= arg
;
315 struct sfc_dp_rxq
*dp_rxq
;
318 dp_rxq
= evq
->dp_rxq
;
319 SFC_ASSERT(dp_rxq
!= NULL
);
321 rxq
= sfc_rxq_by_dp_rxq(dp_rxq
);
322 SFC_ASSERT(rxq
!= NULL
);
323 SFC_ASSERT(rxq
->hw_index
== rxq_hw_index
);
324 SFC_ASSERT(rxq
->evq
== evq
);
325 sfc_rx_qflush_failed(rxq
);
331 sfc_ev_nop_txq_flush_done(void *arg
, uint32_t txq_hw_index
)
333 struct sfc_evq
*evq
= arg
;
335 sfc_err(evq
->sa
, "EVQ %u unexpected TxQ %u flush done",
336 evq
->evq_index
, txq_hw_index
);
341 sfc_ev_txq_flush_done(void *arg
, __rte_unused
uint32_t txq_hw_index
)
343 struct sfc_evq
*evq
= arg
;
344 struct sfc_dp_txq
*dp_txq
;
347 dp_txq
= evq
->dp_txq
;
348 SFC_ASSERT(dp_txq
!= NULL
);
350 txq
= sfc_txq_by_dp_txq(dp_txq
);
351 SFC_ASSERT(txq
!= NULL
);
352 SFC_ASSERT(txq
->hw_index
== txq_hw_index
);
353 SFC_ASSERT(txq
->evq
== evq
);
354 sfc_tx_qflush_done(txq
);
360 sfc_ev_software(void *arg
, uint16_t magic
)
362 struct sfc_evq
*evq
= arg
;
364 sfc_err(evq
->sa
, "EVQ %u unexpected software event magic=%#.4x",
365 evq
->evq_index
, magic
);
370 sfc_ev_sram(void *arg
, uint32_t code
)
372 struct sfc_evq
*evq
= arg
;
374 sfc_err(evq
->sa
, "EVQ %u unexpected SRAM event code=%u",
375 evq
->evq_index
, code
);
380 sfc_ev_wake_up(void *arg
, uint32_t index
)
382 struct sfc_evq
*evq
= arg
;
384 sfc_err(evq
->sa
, "EVQ %u unexpected wake up event index=%u",
385 evq
->evq_index
, index
);
390 sfc_ev_timer(void *arg
, uint32_t index
)
392 struct sfc_evq
*evq
= arg
;
394 sfc_err(evq
->sa
, "EVQ %u unexpected timer event index=%u",
395 evq
->evq_index
, index
);
400 sfc_ev_nop_link_change(void *arg
, __rte_unused efx_link_mode_t link_mode
)
402 struct sfc_evq
*evq
= arg
;
404 sfc_err(evq
->sa
, "EVQ %u unexpected link change event",
410 sfc_ev_link_change(void *arg
, efx_link_mode_t link_mode
)
412 struct sfc_evq
*evq
= arg
;
413 struct sfc_adapter
*sa
= evq
->sa
;
414 struct rte_eth_link new_link
;
416 sfc_port_link_mode_to_info(link_mode
, &new_link
);
417 if (rte_eth_linkstatus_set(sa
->eth_dev
, &new_link
))
418 evq
->sa
->port
.lsc_seq
++;
423 static const efx_ev_callbacks_t sfc_ev_callbacks
= {
424 .eec_initialized
= sfc_ev_initialized
,
425 .eec_rx
= sfc_ev_nop_rx
,
426 .eec_rx_ps
= sfc_ev_nop_rx_ps
,
427 .eec_tx
= sfc_ev_nop_tx
,
428 .eec_exception
= sfc_ev_exception
,
429 .eec_rxq_flush_done
= sfc_ev_nop_rxq_flush_done
,
430 .eec_rxq_flush_failed
= sfc_ev_nop_rxq_flush_failed
,
431 .eec_txq_flush_done
= sfc_ev_nop_txq_flush_done
,
432 .eec_software
= sfc_ev_software
,
433 .eec_sram
= sfc_ev_sram
,
434 .eec_wake_up
= sfc_ev_wake_up
,
435 .eec_timer
= sfc_ev_timer
,
436 .eec_link_change
= sfc_ev_link_change
,
439 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx
= {
440 .eec_initialized
= sfc_ev_initialized
,
441 .eec_rx
= sfc_ev_efx_rx
,
442 .eec_rx_ps
= sfc_ev_nop_rx_ps
,
443 .eec_tx
= sfc_ev_nop_tx
,
444 .eec_exception
= sfc_ev_exception
,
445 .eec_rxq_flush_done
= sfc_ev_rxq_flush_done
,
446 .eec_rxq_flush_failed
= sfc_ev_rxq_flush_failed
,
447 .eec_txq_flush_done
= sfc_ev_nop_txq_flush_done
,
448 .eec_software
= sfc_ev_software
,
449 .eec_sram
= sfc_ev_sram
,
450 .eec_wake_up
= sfc_ev_wake_up
,
451 .eec_timer
= sfc_ev_timer
,
452 .eec_link_change
= sfc_ev_nop_link_change
,
455 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx
= {
456 .eec_initialized
= sfc_ev_initialized
,
457 .eec_rx
= sfc_ev_dp_rx
,
458 .eec_rx_ps
= sfc_ev_dp_rx_ps
,
459 .eec_tx
= sfc_ev_nop_tx
,
460 .eec_exception
= sfc_ev_exception
,
461 .eec_rxq_flush_done
= sfc_ev_rxq_flush_done
,
462 .eec_rxq_flush_failed
= sfc_ev_rxq_flush_failed
,
463 .eec_txq_flush_done
= sfc_ev_nop_txq_flush_done
,
464 .eec_software
= sfc_ev_software
,
465 .eec_sram
= sfc_ev_sram
,
466 .eec_wake_up
= sfc_ev_wake_up
,
467 .eec_timer
= sfc_ev_timer
,
468 .eec_link_change
= sfc_ev_nop_link_change
,
471 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx
= {
472 .eec_initialized
= sfc_ev_initialized
,
473 .eec_rx
= sfc_ev_nop_rx
,
474 .eec_rx_ps
= sfc_ev_nop_rx_ps
,
476 .eec_exception
= sfc_ev_exception
,
477 .eec_rxq_flush_done
= sfc_ev_nop_rxq_flush_done
,
478 .eec_rxq_flush_failed
= sfc_ev_nop_rxq_flush_failed
,
479 .eec_txq_flush_done
= sfc_ev_txq_flush_done
,
480 .eec_software
= sfc_ev_software
,
481 .eec_sram
= sfc_ev_sram
,
482 .eec_wake_up
= sfc_ev_wake_up
,
483 .eec_timer
= sfc_ev_timer
,
484 .eec_link_change
= sfc_ev_nop_link_change
,
487 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx
= {
488 .eec_initialized
= sfc_ev_initialized
,
489 .eec_rx
= sfc_ev_nop_rx
,
490 .eec_rx_ps
= sfc_ev_nop_rx_ps
,
491 .eec_tx
= sfc_ev_dp_tx
,
492 .eec_exception
= sfc_ev_exception
,
493 .eec_rxq_flush_done
= sfc_ev_nop_rxq_flush_done
,
494 .eec_rxq_flush_failed
= sfc_ev_nop_rxq_flush_failed
,
495 .eec_txq_flush_done
= sfc_ev_txq_flush_done
,
496 .eec_software
= sfc_ev_software
,
497 .eec_sram
= sfc_ev_sram
,
498 .eec_wake_up
= sfc_ev_wake_up
,
499 .eec_timer
= sfc_ev_timer
,
500 .eec_link_change
= sfc_ev_nop_link_change
,
505 sfc_ev_qpoll(struct sfc_evq
*evq
)
507 SFC_ASSERT(evq
->init_state
== SFC_EVQ_STARTED
||
508 evq
->init_state
== SFC_EVQ_STARTING
);
510 /* Synchronize the DMA memory for reading not required */
512 efx_ev_qpoll(evq
->common
, &evq
->read_ptr
, evq
->callbacks
, evq
);
514 if (unlikely(evq
->exception
) && sfc_adapter_trylock(evq
->sa
)) {
515 struct sfc_adapter
*sa
= evq
->sa
;
518 if (evq
->dp_rxq
!= NULL
) {
519 unsigned int rxq_sw_index
;
521 rxq_sw_index
= evq
->dp_rxq
->dpq
.queue_id
;
524 "restart RxQ %u because of exception on its EvQ %u",
525 rxq_sw_index
, evq
->evq_index
);
527 sfc_rx_qstop(sa
, rxq_sw_index
);
528 rc
= sfc_rx_qstart(sa
, rxq_sw_index
);
530 sfc_err(sa
, "cannot restart RxQ %u",
534 if (evq
->dp_txq
!= NULL
) {
535 unsigned int txq_sw_index
;
537 txq_sw_index
= evq
->dp_txq
->dpq
.queue_id
;
540 "restart TxQ %u because of exception on its EvQ %u",
541 txq_sw_index
, evq
->evq_index
);
543 sfc_tx_qstop(sa
, txq_sw_index
);
544 rc
= sfc_tx_qstart(sa
, txq_sw_index
);
546 sfc_err(sa
, "cannot restart TxQ %u",
551 sfc_panic(sa
, "unrecoverable exception on EvQ %u",
554 sfc_adapter_unlock(sa
);
557 /* Poll-mode driver does not re-prime the event queue for interrupts */
561 sfc_ev_mgmt_qpoll(struct sfc_adapter
*sa
)
563 if (rte_spinlock_trylock(&sa
->mgmt_evq_lock
)) {
564 if (sa
->mgmt_evq_running
)
565 sfc_ev_qpoll(sa
->mgmt_evq
);
567 rte_spinlock_unlock(&sa
->mgmt_evq_lock
);
572 sfc_ev_qprime(struct sfc_evq
*evq
)
574 SFC_ASSERT(evq
->init_state
== SFC_EVQ_STARTED
);
575 return efx_ev_qprime(evq
->common
, evq
->read_ptr
);
578 /* Event queue HW index allocation scheme is described in sfc_ev.h. */
580 sfc_ev_qstart(struct sfc_evq
*evq
, unsigned int hw_index
)
582 struct sfc_adapter
*sa
= evq
->sa
;
584 uint32_t evq_flags
= sa
->evq_flags
;
585 unsigned int total_delay_us
;
586 unsigned int delay_us
;
589 sfc_log_init(sa
, "hw_index=%u", hw_index
);
593 evq
->evq_index
= hw_index
;
595 /* Clear all events */
596 (void)memset((void *)esmp
->esm_base
, 0xff, EFX_EVQ_SIZE(evq
->entries
));
598 if (sa
->intr
.lsc_intr
&& hw_index
== sa
->mgmt_evq_index
)
599 evq_flags
|= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT
;
601 evq_flags
|= EFX_EVQ_FLAGS_NOTIFY_DISABLED
;
603 /* Create the common code event queue */
604 rc
= efx_ev_qcreate(sa
->nic
, hw_index
, esmp
, evq
->entries
,
605 0 /* unused on EF10 */, 0, evq_flags
,
608 goto fail_ev_qcreate
;
610 SFC_ASSERT(evq
->dp_rxq
== NULL
|| evq
->dp_txq
== NULL
);
611 if (evq
->dp_rxq
!= 0) {
612 if (strcmp(sa
->dp_rx
->dp
.name
, SFC_KVARG_DATAPATH_EFX
) == 0)
613 evq
->callbacks
= &sfc_ev_callbacks_efx_rx
;
615 evq
->callbacks
= &sfc_ev_callbacks_dp_rx
;
616 } else if (evq
->dp_txq
!= 0) {
617 if (strcmp(sa
->dp_tx
->dp
.name
, SFC_KVARG_DATAPATH_EFX
) == 0)
618 evq
->callbacks
= &sfc_ev_callbacks_efx_tx
;
620 evq
->callbacks
= &sfc_ev_callbacks_dp_tx
;
622 evq
->callbacks
= &sfc_ev_callbacks
;
625 evq
->init_state
= SFC_EVQ_STARTING
;
627 /* Wait for the initialization event */
629 delay_us
= SFC_EVQ_INIT_BACKOFF_START_US
;
631 (void)sfc_ev_qpoll(evq
);
633 /* Check to see if the initialization complete indication
634 * posted by the hardware.
636 if (evq
->init_state
== SFC_EVQ_STARTED
)
639 /* Give event queue some time to init */
640 rte_delay_us(delay_us
);
642 total_delay_us
+= delay_us
;
644 /* Exponential backoff */
646 if (delay_us
> SFC_EVQ_INIT_BACKOFF_MAX_US
)
647 delay_us
= SFC_EVQ_INIT_BACKOFF_MAX_US
;
649 } while (total_delay_us
< SFC_EVQ_INIT_TIMEOUT_US
);
658 evq
->init_state
= SFC_EVQ_INITIALIZED
;
659 efx_ev_qdestroy(evq
->common
);
662 sfc_log_init(sa
, "failed %d", rc
);
667 sfc_ev_qstop(struct sfc_evq
*evq
)
672 sfc_log_init(evq
->sa
, "hw_index=%u", evq
->evq_index
);
674 if (evq
->init_state
!= SFC_EVQ_STARTED
)
677 evq
->init_state
= SFC_EVQ_INITIALIZED
;
678 evq
->callbacks
= NULL
;
680 evq
->exception
= B_FALSE
;
682 efx_ev_qdestroy(evq
->common
);
688 sfc_ev_mgmt_periodic_qpoll(void *arg
)
690 struct sfc_adapter
*sa
= arg
;
693 sfc_ev_mgmt_qpoll(sa
);
695 rc
= rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US
,
696 sfc_ev_mgmt_periodic_qpoll
, sa
);
697 if (rc
== -ENOTSUP
) {
698 sfc_warn(sa
, "alarms are not supported");
699 sfc_warn(sa
, "management EVQ must be polled indirectly using no-wait link status update");
700 } else if (rc
!= 0) {
702 "cannot rearm management EVQ polling alarm (rc=%d)",
708 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter
*sa
)
710 sfc_ev_mgmt_periodic_qpoll(sa
);
714 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter
*sa
)
716 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll
, sa
);
720 sfc_ev_start(struct sfc_adapter
*sa
)
724 sfc_log_init(sa
, "entry");
726 rc
= efx_ev_init(sa
->nic
);
730 /* Start management EVQ used for global events */
733 * Management event queue start polls the queue, but it cannot
734 * interfere with other polling contexts since mgmt_evq_running
737 rc
= sfc_ev_qstart(sa
->mgmt_evq
, sa
->mgmt_evq_index
);
739 goto fail_mgmt_evq_start
;
741 rte_spinlock_lock(&sa
->mgmt_evq_lock
);
742 sa
->mgmt_evq_running
= true;
743 rte_spinlock_unlock(&sa
->mgmt_evq_lock
);
745 if (sa
->intr
.lsc_intr
) {
746 rc
= sfc_ev_qprime(sa
->mgmt_evq
);
748 goto fail_mgmt_evq_prime
;
752 * Start management EVQ polling. If interrupts are disabled
753 * (not used), it is required to process link status change
754 * and other device level events to avoid unrecoverable
755 * error because the event queue overflow.
757 sfc_ev_mgmt_periodic_qpoll_start(sa
);
760 * Rx/Tx event queues are started/stopped when corresponding
761 * Rx/Tx queue is started/stopped.
767 sfc_ev_qstop(sa
->mgmt_evq
);
770 efx_ev_fini(sa
->nic
);
773 sfc_log_init(sa
, "failed %d", rc
);
778 sfc_ev_stop(struct sfc_adapter
*sa
)
780 sfc_log_init(sa
, "entry");
782 sfc_ev_mgmt_periodic_qpoll_stop(sa
);
784 rte_spinlock_lock(&sa
->mgmt_evq_lock
);
785 sa
->mgmt_evq_running
= false;
786 rte_spinlock_unlock(&sa
->mgmt_evq_lock
);
788 sfc_ev_qstop(sa
->mgmt_evq
);
790 efx_ev_fini(sa
->nic
);
794 sfc_ev_qinit(struct sfc_adapter
*sa
,
795 enum sfc_evq_type type
, unsigned int type_index
,
796 unsigned int entries
, int socket_id
, struct sfc_evq
**evqp
)
801 sfc_log_init(sa
, "type=%s type_index=%u",
802 sfc_evq_type2str(type
), type_index
);
804 SFC_ASSERT(rte_is_power_of_2(entries
));
807 evq
= rte_zmalloc_socket("sfc-evq", sizeof(*evq
), RTE_CACHE_LINE_SIZE
,
814 evq
->entries
= entries
;
816 /* Allocate DMA space */
817 rc
= sfc_dma_alloc(sa
, sfc_evq_type2str(type
), type_index
,
818 EFX_EVQ_SIZE(evq
->entries
), socket_id
, &evq
->mem
);
822 evq
->init_state
= SFC_EVQ_INITIALIZED
;
835 sfc_log_init(sa
, "failed %d", rc
);
840 sfc_ev_qfini(struct sfc_evq
*evq
)
842 struct sfc_adapter
*sa
= evq
->sa
;
844 SFC_ASSERT(evq
->init_state
== SFC_EVQ_INITIALIZED
);
846 sfc_dma_free(sa
, &evq
->mem
);
850 SFC_ASSERT(sa
->evq_count
> 0);
855 sfc_kvarg_perf_profile_handler(__rte_unused
const char *key
,
856 const char *value_str
, void *opaque
)
858 uint32_t *value
= opaque
;
860 if (strcasecmp(value_str
, SFC_KVARG_PERF_PROFILE_THROUGHPUT
) == 0)
861 *value
= EFX_EVQ_FLAGS_TYPE_THROUGHPUT
;
862 else if (strcasecmp(value_str
, SFC_KVARG_PERF_PROFILE_LOW_LATENCY
) == 0)
863 *value
= EFX_EVQ_FLAGS_TYPE_LOW_LATENCY
;
864 else if (strcasecmp(value_str
, SFC_KVARG_PERF_PROFILE_AUTO
) == 0)
865 *value
= EFX_EVQ_FLAGS_TYPE_AUTO
;
873 sfc_ev_attach(struct sfc_adapter
*sa
)
877 sfc_log_init(sa
, "entry");
879 sa
->evq_flags
= EFX_EVQ_FLAGS_TYPE_THROUGHPUT
;
880 rc
= sfc_kvargs_process(sa
, SFC_KVARG_PERF_PROFILE
,
881 sfc_kvarg_perf_profile_handler
,
884 sfc_err(sa
, "invalid %s parameter value",
885 SFC_KVARG_PERF_PROFILE
);
886 goto fail_kvarg_perf_profile
;
889 sa
->mgmt_evq_index
= 0;
890 rte_spinlock_init(&sa
->mgmt_evq_lock
);
892 rc
= sfc_ev_qinit(sa
, SFC_EVQ_TYPE_MGMT
, 0, SFC_MGMT_EVQ_ENTRIES
,
893 sa
->socket_id
, &sa
->mgmt_evq
);
895 goto fail_mgmt_evq_init
;
898 * Rx/Tx event queues are created/destroyed when corresponding
899 * Rx/Tx queue is created/destroyed.
906 fail_kvarg_perf_profile
:
907 sfc_log_init(sa
, "failed %d", rc
);
912 sfc_ev_detach(struct sfc_adapter
*sa
)
914 sfc_log_init(sa
, "entry");
916 sfc_ev_qfini(sa
->mgmt_evq
);
918 if (sa
->evq_count
!= 0)
919 sfc_err(sa
, "%u EvQs are not destroyed before detach",