4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_debug.h>
33 #include <rte_cycles.h>
34 #include <rte_alarm.h>
35 #include <rte_branch_prediction.h>
40 #include "sfc_debug.h"
45 #include "sfc_kvargs.h"
48 /* Initial delay when waiting for event queue init complete event */
49 #define SFC_EVQ_INIT_BACKOFF_START_US (1)
50 /* Maximum delay between event queue polling attempts */
51 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
52 /* Event queue init approx timeout */
53 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
55 /* Management event queue polling period in microseconds */
56 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
59 sfc_evq_type2str(enum sfc_evq_type type
)
62 case SFC_EVQ_TYPE_MGMT
:
75 sfc_ev_initialized(void *arg
)
77 struct sfc_evq
*evq
= arg
;
79 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
80 SFC_ASSERT(evq
->init_state
== SFC_EVQ_STARTING
||
81 evq
->init_state
== SFC_EVQ_STARTED
);
83 evq
->init_state
= SFC_EVQ_STARTED
;
89 sfc_ev_nop_rx(void *arg
, uint32_t label
, uint32_t id
,
90 uint32_t size
, uint16_t flags
)
92 struct sfc_evq
*evq
= arg
;
95 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
96 evq
->evq_index
, label
, id
, size
, flags
);
101 sfc_ev_efx_rx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
,
102 uint32_t size
, uint16_t flags
)
104 struct sfc_evq
*evq
= arg
;
105 struct sfc_efx_rxq
*rxq
;
107 unsigned int pending_id
;
110 struct sfc_efx_rx_sw_desc
*rxd
;
112 if (unlikely(evq
->exception
))
115 rxq
= sfc_efx_rxq_by_dp_rxq(evq
->dp_rxq
);
117 SFC_ASSERT(rxq
!= NULL
);
118 SFC_ASSERT(rxq
->evq
== evq
);
119 SFC_ASSERT(rxq
->flags
& SFC_EFX_RXQ_FLAG_STARTED
);
121 stop
= (id
+ 1) & rxq
->ptr_mask
;
122 pending_id
= rxq
->pending
& rxq
->ptr_mask
;
123 delta
= (stop
>= pending_id
) ? (stop
- pending_id
) :
124 (rxq
->ptr_mask
+ 1 - pending_id
+ stop
);
128 * Rx event with no new descriptors done and zero length
129 * is used to abort scattered packet when there is no room
132 if (unlikely(size
!= 0)) {
133 evq
->exception
= B_TRUE
;
135 "EVQ %u RxQ %u invalid RX abort "
136 "(id=%#x size=%u flags=%#x); needs restart",
137 evq
->evq_index
, rxq
->dp
.dpq
.queue_id
,
142 /* Add discard flag to the first fragment */
143 rxq
->sw_desc
[pending_id
].flags
|= EFX_DISCARD
;
144 /* Remove continue flag from the last fragment */
145 rxq
->sw_desc
[id
].flags
&= ~EFX_PKT_CONT
;
146 } else if (unlikely(delta
> rxq
->batch_max
)) {
147 evq
->exception
= B_TRUE
;
150 "EVQ %u RxQ %u completion out of order "
151 "(id=%#x delta=%u flags=%#x); needs restart",
152 evq
->evq_index
, rxq
->dp
.dpq
.queue_id
,
158 for (i
= pending_id
; i
!= stop
; i
= (i
+ 1) & rxq
->ptr_mask
) {
159 rxd
= &rxq
->sw_desc
[i
];
163 SFC_ASSERT(size
< (1 << 16));
164 rxd
->size
= (uint16_t)size
;
167 rxq
->pending
+= delta
;
174 sfc_ev_dp_rx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
,
175 __rte_unused
uint32_t size
, __rte_unused
uint16_t flags
)
177 struct sfc_evq
*evq
= arg
;
178 struct sfc_dp_rxq
*dp_rxq
;
180 dp_rxq
= evq
->dp_rxq
;
181 SFC_ASSERT(dp_rxq
!= NULL
);
183 SFC_ASSERT(evq
->sa
->dp_rx
->qrx_ev
!= NULL
);
184 return evq
->sa
->dp_rx
->qrx_ev(dp_rxq
, id
);
188 sfc_ev_nop_tx(void *arg
, uint32_t label
, uint32_t id
)
190 struct sfc_evq
*evq
= arg
;
192 sfc_err(evq
->sa
, "EVQ %u unexpected Tx event label=%u id=%#x",
193 evq
->evq_index
, label
, id
);
198 sfc_ev_tx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
)
200 struct sfc_evq
*evq
= arg
;
201 struct sfc_dp_txq
*dp_txq
;
202 struct sfc_efx_txq
*txq
;
206 dp_txq
= evq
->dp_txq
;
207 SFC_ASSERT(dp_txq
!= NULL
);
209 txq
= sfc_efx_txq_by_dp_txq(dp_txq
);
210 SFC_ASSERT(txq
->evq
== evq
);
212 if (unlikely((txq
->flags
& SFC_EFX_TXQ_FLAG_STARTED
) == 0))
215 stop
= (id
+ 1) & txq
->ptr_mask
;
216 id
= txq
->pending
& txq
->ptr_mask
;
218 delta
= (stop
>= id
) ? (stop
- id
) : (txq
->ptr_mask
+ 1 - id
+ stop
);
220 txq
->pending
+= delta
;
227 sfc_ev_dp_tx(void *arg
, __rte_unused
uint32_t label
, uint32_t id
)
229 struct sfc_evq
*evq
= arg
;
230 struct sfc_dp_txq
*dp_txq
;
232 dp_txq
= evq
->dp_txq
;
233 SFC_ASSERT(dp_txq
!= NULL
);
235 SFC_ASSERT(evq
->sa
->dp_tx
->qtx_ev
!= NULL
);
236 return evq
->sa
->dp_tx
->qtx_ev(dp_txq
, id
);
240 sfc_ev_exception(void *arg
, __rte_unused
uint32_t code
,
241 __rte_unused
uint32_t data
)
243 struct sfc_evq
*evq
= arg
;
245 if (code
== EFX_EXCEPTION_UNKNOWN_SENSOREVT
)
248 evq
->exception
= B_TRUE
;
250 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
252 (code
== EFX_EXCEPTION_RX_RECOVERY
) ? "RX_RECOVERY" :
253 (code
== EFX_EXCEPTION_RX_DSC_ERROR
) ? "RX_DSC_ERROR" :
254 (code
== EFX_EXCEPTION_TX_DSC_ERROR
) ? "TX_DSC_ERROR" :
255 (code
== EFX_EXCEPTION_FWALERT_SRAM
) ? "FWALERT_SRAM" :
256 (code
== EFX_EXCEPTION_UNKNOWN_FWALERT
) ? "UNKNOWN_FWALERT" :
257 (code
== EFX_EXCEPTION_RX_ERROR
) ? "RX_ERROR" :
258 (code
== EFX_EXCEPTION_TX_ERROR
) ? "TX_ERROR" :
259 (code
== EFX_EXCEPTION_EV_ERROR
) ? "EV_ERROR" :
261 code
, data
, evq
->evq_index
);
267 sfc_ev_nop_rxq_flush_done(void *arg
, uint32_t rxq_hw_index
)
269 struct sfc_evq
*evq
= arg
;
271 sfc_err(evq
->sa
, "EVQ %u unexpected RxQ %u flush done",
272 evq
->evq_index
, rxq_hw_index
);
277 sfc_ev_rxq_flush_done(void *arg
, __rte_unused
uint32_t rxq_hw_index
)
279 struct sfc_evq
*evq
= arg
;
280 struct sfc_dp_rxq
*dp_rxq
;
283 dp_rxq
= evq
->dp_rxq
;
284 SFC_ASSERT(dp_rxq
!= NULL
);
286 rxq
= sfc_rxq_by_dp_rxq(dp_rxq
);
287 SFC_ASSERT(rxq
!= NULL
);
288 SFC_ASSERT(rxq
->hw_index
== rxq_hw_index
);
289 SFC_ASSERT(rxq
->evq
== evq
);
290 sfc_rx_qflush_done(rxq
);
296 sfc_ev_nop_rxq_flush_failed(void *arg
, uint32_t rxq_hw_index
)
298 struct sfc_evq
*evq
= arg
;
300 sfc_err(evq
->sa
, "EVQ %u unexpected RxQ %u flush failed",
301 evq
->evq_index
, rxq_hw_index
);
306 sfc_ev_rxq_flush_failed(void *arg
, __rte_unused
uint32_t rxq_hw_index
)
308 struct sfc_evq
*evq
= arg
;
309 struct sfc_dp_rxq
*dp_rxq
;
312 dp_rxq
= evq
->dp_rxq
;
313 SFC_ASSERT(dp_rxq
!= NULL
);
315 rxq
= sfc_rxq_by_dp_rxq(dp_rxq
);
316 SFC_ASSERT(rxq
!= NULL
);
317 SFC_ASSERT(rxq
->hw_index
== rxq_hw_index
);
318 SFC_ASSERT(rxq
->evq
== evq
);
319 sfc_rx_qflush_failed(rxq
);
325 sfc_ev_nop_txq_flush_done(void *arg
, uint32_t txq_hw_index
)
327 struct sfc_evq
*evq
= arg
;
329 sfc_err(evq
->sa
, "EVQ %u unexpected TxQ %u flush done",
330 evq
->evq_index
, txq_hw_index
);
335 sfc_ev_txq_flush_done(void *arg
, __rte_unused
uint32_t txq_hw_index
)
337 struct sfc_evq
*evq
= arg
;
338 struct sfc_dp_txq
*dp_txq
;
341 dp_txq
= evq
->dp_txq
;
342 SFC_ASSERT(dp_txq
!= NULL
);
344 txq
= sfc_txq_by_dp_txq(dp_txq
);
345 SFC_ASSERT(txq
!= NULL
);
346 SFC_ASSERT(txq
->hw_index
== txq_hw_index
);
347 SFC_ASSERT(txq
->evq
== evq
);
348 sfc_tx_qflush_done(txq
);
354 sfc_ev_software(void *arg
, uint16_t magic
)
356 struct sfc_evq
*evq
= arg
;
358 sfc_err(evq
->sa
, "EVQ %u unexpected software event magic=%#.4x",
359 evq
->evq_index
, magic
);
364 sfc_ev_sram(void *arg
, uint32_t code
)
366 struct sfc_evq
*evq
= arg
;
368 sfc_err(evq
->sa
, "EVQ %u unexpected SRAM event code=%u",
369 evq
->evq_index
, code
);
374 sfc_ev_wake_up(void *arg
, uint32_t index
)
376 struct sfc_evq
*evq
= arg
;
378 sfc_err(evq
->sa
, "EVQ %u unexpected wake up event index=%u",
379 evq
->evq_index
, index
);
384 sfc_ev_timer(void *arg
, uint32_t index
)
386 struct sfc_evq
*evq
= arg
;
388 sfc_err(evq
->sa
, "EVQ %u unexpected timer event index=%u",
389 evq
->evq_index
, index
);
394 sfc_ev_nop_link_change(void *arg
, __rte_unused efx_link_mode_t link_mode
)
396 struct sfc_evq
*evq
= arg
;
398 sfc_err(evq
->sa
, "EVQ %u unexpected link change event",
404 sfc_ev_link_change(void *arg
, efx_link_mode_t link_mode
)
406 struct sfc_evq
*evq
= arg
;
407 struct sfc_adapter
*sa
= evq
->sa
;
408 struct rte_eth_link
*dev_link
= &sa
->eth_dev
->data
->dev_link
;
409 struct rte_eth_link new_link
;
410 uint64_t new_link_u64
;
411 uint64_t old_link_u64
;
413 EFX_STATIC_ASSERT(sizeof(*dev_link
) == sizeof(rte_atomic64_t
));
415 sfc_port_link_mode_to_info(link_mode
, &new_link
);
417 new_link_u64
= *(uint64_t *)&new_link
;
419 old_link_u64
= rte_atomic64_read((rte_atomic64_t
*)dev_link
);
420 if (old_link_u64
== new_link_u64
)
423 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link
,
424 old_link_u64
, new_link_u64
)) {
425 evq
->sa
->port
.lsc_seq
++;
433 static const efx_ev_callbacks_t sfc_ev_callbacks
= {
434 .eec_initialized
= sfc_ev_initialized
,
435 .eec_rx
= sfc_ev_nop_rx
,
436 .eec_tx
= sfc_ev_nop_tx
,
437 .eec_exception
= sfc_ev_exception
,
438 .eec_rxq_flush_done
= sfc_ev_nop_rxq_flush_done
,
439 .eec_rxq_flush_failed
= sfc_ev_nop_rxq_flush_failed
,
440 .eec_txq_flush_done
= sfc_ev_nop_txq_flush_done
,
441 .eec_software
= sfc_ev_software
,
442 .eec_sram
= sfc_ev_sram
,
443 .eec_wake_up
= sfc_ev_wake_up
,
444 .eec_timer
= sfc_ev_timer
,
445 .eec_link_change
= sfc_ev_link_change
,
448 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx
= {
449 .eec_initialized
= sfc_ev_initialized
,
450 .eec_rx
= sfc_ev_efx_rx
,
451 .eec_tx
= sfc_ev_nop_tx
,
452 .eec_exception
= sfc_ev_exception
,
453 .eec_rxq_flush_done
= sfc_ev_rxq_flush_done
,
454 .eec_rxq_flush_failed
= sfc_ev_rxq_flush_failed
,
455 .eec_txq_flush_done
= sfc_ev_nop_txq_flush_done
,
456 .eec_software
= sfc_ev_software
,
457 .eec_sram
= sfc_ev_sram
,
458 .eec_wake_up
= sfc_ev_wake_up
,
459 .eec_timer
= sfc_ev_timer
,
460 .eec_link_change
= sfc_ev_nop_link_change
,
463 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx
= {
464 .eec_initialized
= sfc_ev_initialized
,
465 .eec_rx
= sfc_ev_dp_rx
,
466 .eec_tx
= sfc_ev_nop_tx
,
467 .eec_exception
= sfc_ev_exception
,
468 .eec_rxq_flush_done
= sfc_ev_rxq_flush_done
,
469 .eec_rxq_flush_failed
= sfc_ev_rxq_flush_failed
,
470 .eec_txq_flush_done
= sfc_ev_nop_txq_flush_done
,
471 .eec_software
= sfc_ev_software
,
472 .eec_sram
= sfc_ev_sram
,
473 .eec_wake_up
= sfc_ev_wake_up
,
474 .eec_timer
= sfc_ev_timer
,
475 .eec_link_change
= sfc_ev_nop_link_change
,
478 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx
= {
479 .eec_initialized
= sfc_ev_initialized
,
480 .eec_rx
= sfc_ev_nop_rx
,
482 .eec_exception
= sfc_ev_exception
,
483 .eec_rxq_flush_done
= sfc_ev_nop_rxq_flush_done
,
484 .eec_rxq_flush_failed
= sfc_ev_nop_rxq_flush_failed
,
485 .eec_txq_flush_done
= sfc_ev_txq_flush_done
,
486 .eec_software
= sfc_ev_software
,
487 .eec_sram
= sfc_ev_sram
,
488 .eec_wake_up
= sfc_ev_wake_up
,
489 .eec_timer
= sfc_ev_timer
,
490 .eec_link_change
= sfc_ev_nop_link_change
,
493 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx
= {
494 .eec_initialized
= sfc_ev_initialized
,
495 .eec_rx
= sfc_ev_nop_rx
,
496 .eec_tx
= sfc_ev_dp_tx
,
497 .eec_exception
= sfc_ev_exception
,
498 .eec_rxq_flush_done
= sfc_ev_nop_rxq_flush_done
,
499 .eec_rxq_flush_failed
= sfc_ev_nop_rxq_flush_failed
,
500 .eec_txq_flush_done
= sfc_ev_txq_flush_done
,
501 .eec_software
= sfc_ev_software
,
502 .eec_sram
= sfc_ev_sram
,
503 .eec_wake_up
= sfc_ev_wake_up
,
504 .eec_timer
= sfc_ev_timer
,
505 .eec_link_change
= sfc_ev_nop_link_change
,
510 sfc_ev_qpoll(struct sfc_evq
*evq
)
512 SFC_ASSERT(evq
->init_state
== SFC_EVQ_STARTED
||
513 evq
->init_state
== SFC_EVQ_STARTING
);
515 /* Synchronize the DMA memory for reading not required */
517 efx_ev_qpoll(evq
->common
, &evq
->read_ptr
, evq
->callbacks
, evq
);
519 if (unlikely(evq
->exception
) && sfc_adapter_trylock(evq
->sa
)) {
520 struct sfc_adapter
*sa
= evq
->sa
;
523 if (evq
->dp_rxq
!= NULL
) {
524 unsigned int rxq_sw_index
;
526 rxq_sw_index
= evq
->dp_rxq
->dpq
.queue_id
;
529 "restart RxQ %u because of exception on its EvQ %u",
530 rxq_sw_index
, evq
->evq_index
);
532 sfc_rx_qstop(sa
, rxq_sw_index
);
533 rc
= sfc_rx_qstart(sa
, rxq_sw_index
);
535 sfc_err(sa
, "cannot restart RxQ %u",
539 if (evq
->dp_txq
!= NULL
) {
540 unsigned int txq_sw_index
;
542 txq_sw_index
= evq
->dp_txq
->dpq
.queue_id
;
545 "restart TxQ %u because of exception on its EvQ %u",
546 txq_sw_index
, evq
->evq_index
);
548 sfc_tx_qstop(sa
, txq_sw_index
);
549 rc
= sfc_tx_qstart(sa
, txq_sw_index
);
551 sfc_err(sa
, "cannot restart TxQ %u",
556 sfc_panic(sa
, "unrecoverable exception on EvQ %u",
559 sfc_adapter_unlock(sa
);
562 /* Poll-mode driver does not re-prime the event queue for interrupts */
566 sfc_ev_mgmt_qpoll(struct sfc_adapter
*sa
)
568 if (rte_spinlock_trylock(&sa
->mgmt_evq_lock
)) {
569 struct sfc_evq
*mgmt_evq
= sa
->mgmt_evq
;
571 if (mgmt_evq
->init_state
== SFC_EVQ_STARTED
)
572 sfc_ev_qpoll(mgmt_evq
);
574 rte_spinlock_unlock(&sa
->mgmt_evq_lock
);
579 sfc_ev_qprime(struct sfc_evq
*evq
)
581 SFC_ASSERT(evq
->init_state
== SFC_EVQ_STARTED
);
582 return efx_ev_qprime(evq
->common
, evq
->read_ptr
);
585 /* Event queue HW index allocation scheme is described in sfc_ev.h. */
587 sfc_ev_qstart(struct sfc_evq
*evq
, unsigned int hw_index
)
589 struct sfc_adapter
*sa
= evq
->sa
;
591 uint32_t evq_flags
= sa
->evq_flags
;
592 unsigned int total_delay_us
;
593 unsigned int delay_us
;
596 sfc_log_init(sa
, "hw_index=%u", hw_index
);
600 evq
->evq_index
= hw_index
;
602 /* Clear all events */
603 (void)memset((void *)esmp
->esm_base
, 0xff, EFX_EVQ_SIZE(evq
->entries
));
605 if (sa
->intr
.lsc_intr
&& hw_index
== sa
->mgmt_evq_index
)
606 evq_flags
|= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT
;
608 evq_flags
|= EFX_EVQ_FLAGS_NOTIFY_DISABLED
;
610 /* Create the common code event queue */
611 rc
= efx_ev_qcreate(sa
->nic
, hw_index
, esmp
, evq
->entries
,
612 0 /* unused on EF10 */, 0, evq_flags
,
615 goto fail_ev_qcreate
;
617 SFC_ASSERT(evq
->dp_rxq
== NULL
|| evq
->dp_txq
== NULL
);
618 if (evq
->dp_rxq
!= 0) {
619 if (strcmp(sa
->dp_rx
->dp
.name
, SFC_KVARG_DATAPATH_EFX
) == 0)
620 evq
->callbacks
= &sfc_ev_callbacks_efx_rx
;
622 evq
->callbacks
= &sfc_ev_callbacks_dp_rx
;
623 } else if (evq
->dp_txq
!= 0) {
624 if (strcmp(sa
->dp_tx
->dp
.name
, SFC_KVARG_DATAPATH_EFX
) == 0)
625 evq
->callbacks
= &sfc_ev_callbacks_efx_tx
;
627 evq
->callbacks
= &sfc_ev_callbacks_dp_tx
;
629 evq
->callbacks
= &sfc_ev_callbacks
;
632 evq
->init_state
= SFC_EVQ_STARTING
;
634 /* Wait for the initialization event */
636 delay_us
= SFC_EVQ_INIT_BACKOFF_START_US
;
638 (void)sfc_ev_qpoll(evq
);
640 /* Check to see if the initialization complete indication
641 * posted by the hardware.
643 if (evq
->init_state
== SFC_EVQ_STARTED
)
646 /* Give event queue some time to init */
647 rte_delay_us(delay_us
);
649 total_delay_us
+= delay_us
;
651 /* Exponential backoff */
653 if (delay_us
> SFC_EVQ_INIT_BACKOFF_MAX_US
)
654 delay_us
= SFC_EVQ_INIT_BACKOFF_MAX_US
;
656 } while (total_delay_us
< SFC_EVQ_INIT_TIMEOUT_US
);
665 evq
->init_state
= SFC_EVQ_INITIALIZED
;
666 efx_ev_qdestroy(evq
->common
);
669 sfc_log_init(sa
, "failed %d", rc
);
674 sfc_ev_qstop(struct sfc_evq
*evq
)
679 sfc_log_init(evq
->sa
, "hw_index=%u", evq
->evq_index
);
681 if (evq
->init_state
!= SFC_EVQ_STARTED
)
684 evq
->init_state
= SFC_EVQ_INITIALIZED
;
685 evq
->callbacks
= NULL
;
687 evq
->exception
= B_FALSE
;
689 efx_ev_qdestroy(evq
->common
);
695 sfc_ev_mgmt_periodic_qpoll(void *arg
)
697 struct sfc_adapter
*sa
= arg
;
700 sfc_ev_mgmt_qpoll(sa
);
702 rc
= rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US
,
703 sfc_ev_mgmt_periodic_qpoll
, sa
);
704 if (rc
== -ENOTSUP
) {
705 sfc_warn(sa
, "alarms are not supported");
706 sfc_warn(sa
, "management EVQ must be polled indirectly using no-wait link status update");
707 } else if (rc
!= 0) {
709 "cannot rearm management EVQ polling alarm (rc=%d)",
715 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter
*sa
)
717 sfc_ev_mgmt_periodic_qpoll(sa
);
721 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter
*sa
)
723 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll
, sa
);
727 sfc_ev_start(struct sfc_adapter
*sa
)
731 sfc_log_init(sa
, "entry");
733 rc
= efx_ev_init(sa
->nic
);
737 /* Start management EVQ used for global events */
738 rte_spinlock_lock(&sa
->mgmt_evq_lock
);
740 rc
= sfc_ev_qstart(sa
->mgmt_evq
, sa
->mgmt_evq_index
);
742 goto fail_mgmt_evq_start
;
744 if (sa
->intr
.lsc_intr
) {
745 rc
= sfc_ev_qprime(sa
->mgmt_evq
);
747 goto fail_evq0_prime
;
750 rte_spinlock_unlock(&sa
->mgmt_evq_lock
);
753 * Start management EVQ polling. If interrupts are disabled
754 * (not used), it is required to process link status change
755 * and other device level events to avoid unrecoverable
756 * error because the event queue overflow.
758 sfc_ev_mgmt_periodic_qpoll_start(sa
);
761 * Rx/Tx event queues are started/stopped when corresponding
762 * Rx/Tx queue is started/stopped.
768 sfc_ev_qstop(sa
->mgmt_evq
);
771 rte_spinlock_unlock(&sa
->mgmt_evq_lock
);
772 efx_ev_fini(sa
->nic
);
775 sfc_log_init(sa
, "failed %d", rc
);
780 sfc_ev_stop(struct sfc_adapter
*sa
)
782 sfc_log_init(sa
, "entry");
784 sfc_ev_mgmt_periodic_qpoll_stop(sa
);
786 rte_spinlock_lock(&sa
->mgmt_evq_lock
);
787 sfc_ev_qstop(sa
->mgmt_evq
);
788 rte_spinlock_unlock(&sa
->mgmt_evq_lock
);
790 efx_ev_fini(sa
->nic
);
794 sfc_ev_qinit(struct sfc_adapter
*sa
,
795 enum sfc_evq_type type
, unsigned int type_index
,
796 unsigned int entries
, int socket_id
, struct sfc_evq
**evqp
)
801 sfc_log_init(sa
, "type=%s type_index=%u",
802 sfc_evq_type2str(type
), type_index
);
804 SFC_ASSERT(rte_is_power_of_2(entries
));
807 evq
= rte_zmalloc_socket("sfc-evq", sizeof(*evq
), RTE_CACHE_LINE_SIZE
,
814 evq
->entries
= entries
;
816 /* Allocate DMA space */
817 rc
= sfc_dma_alloc(sa
, sfc_evq_type2str(type
), type_index
,
818 EFX_EVQ_SIZE(evq
->entries
), socket_id
, &evq
->mem
);
822 evq
->init_state
= SFC_EVQ_INITIALIZED
;
835 sfc_log_init(sa
, "failed %d", rc
);
840 sfc_ev_qfini(struct sfc_evq
*evq
)
842 struct sfc_adapter
*sa
= evq
->sa
;
844 SFC_ASSERT(evq
->init_state
== SFC_EVQ_INITIALIZED
);
846 sfc_dma_free(sa
, &evq
->mem
);
850 SFC_ASSERT(sa
->evq_count
> 0);
855 sfc_kvarg_perf_profile_handler(__rte_unused
const char *key
,
856 const char *value_str
, void *opaque
)
858 uint64_t *value
= opaque
;
860 if (strcasecmp(value_str
, SFC_KVARG_PERF_PROFILE_THROUGHPUT
) == 0)
861 *value
= EFX_EVQ_FLAGS_TYPE_THROUGHPUT
;
862 else if (strcasecmp(value_str
, SFC_KVARG_PERF_PROFILE_LOW_LATENCY
) == 0)
863 *value
= EFX_EVQ_FLAGS_TYPE_LOW_LATENCY
;
864 else if (strcasecmp(value_str
, SFC_KVARG_PERF_PROFILE_AUTO
) == 0)
865 *value
= EFX_EVQ_FLAGS_TYPE_AUTO
;
873 sfc_ev_attach(struct sfc_adapter
*sa
)
877 sfc_log_init(sa
, "entry");
879 sa
->evq_flags
= EFX_EVQ_FLAGS_TYPE_THROUGHPUT
;
880 rc
= sfc_kvargs_process(sa
, SFC_KVARG_PERF_PROFILE
,
881 sfc_kvarg_perf_profile_handler
,
884 sfc_err(sa
, "invalid %s parameter value",
885 SFC_KVARG_PERF_PROFILE
);
886 goto fail_kvarg_perf_profile
;
889 sa
->mgmt_evq_index
= 0;
890 rte_spinlock_init(&sa
->mgmt_evq_lock
);
892 rc
= sfc_ev_qinit(sa
, SFC_EVQ_TYPE_MGMT
, 0, SFC_MGMT_EVQ_ENTRIES
,
893 sa
->socket_id
, &sa
->mgmt_evq
);
895 goto fail_mgmt_evq_init
;
898 * Rx/Tx event queues are created/destroyed when corresponding
899 * Rx/Tx queue is created/destroyed.
906 fail_kvarg_perf_profile
:
907 sfc_log_init(sa
, "failed %d", rc
);
912 sfc_ev_detach(struct sfc_adapter
*sa
)
914 sfc_log_init(sa
, "entry");
916 sfc_ev_qfini(sa
->mgmt_evq
);
918 if (sa
->evq_count
!= 0)
919 sfc_err(sa
, "%u EvQs are not destroyed before detach",