1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation.
11 #include <rte_memzone.h>
12 #include <rte_memory.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_cycles.h>
23 #include "rte_eventdev.h"
24 #include "rte_eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_event_timer_adapter_pmd.h"
28 #define DATA_MZ_NAME_MAX_LEN 64
29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
31 static int evtim_logtype
;
32 static int evtim_svc_logtype
;
33 static int evtim_buffer_logtype
;
35 static struct rte_event_timer_adapter adapters
[RTE_EVENT_TIMER_ADAPTER_NUM_MAX
];
37 static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops
;
39 #define EVTIM_LOG(level, logtype, ...) \
40 rte_log(RTE_LOG_ ## level, logtype, \
41 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
42 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
47 #define EVTIM_LOG_DBG(...) \
48 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
49 #define EVTIM_BUF_LOG_DBG(...) \
50 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
51 #define EVTIM_SVC_LOG_DBG(...) \
52 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
54 #define EVTIM_LOG_DBG(...) (void)0
55 #define EVTIM_BUF_LOG_DBG(...) (void)0
56 #define EVTIM_SVC_LOG_DBG(...) (void)0
60 default_port_conf_cb(uint16_t id
, uint8_t event_dev_id
, uint8_t *event_port_id
,
63 struct rte_event_timer_adapter
*adapter
;
64 struct rte_eventdev
*dev
;
65 struct rte_event_dev_config dev_conf
;
66 struct rte_event_port_conf
*port_conf
, def_port_conf
= {0};
72 RTE_SET_USED(event_dev_id
);
74 adapter
= &adapters
[id
];
75 dev
= &rte_eventdevs
[adapter
->data
->event_dev_id
];
76 dev_id
= dev
->data
->dev_id
;
77 dev_conf
= dev
->data
->dev_conf
;
79 started
= dev
->data
->dev_started
;
81 rte_event_dev_stop(dev_id
);
83 port_id
= dev_conf
.nb_event_ports
;
84 dev_conf
.nb_event_ports
+= 1;
85 ret
= rte_event_dev_configure(dev_id
, &dev_conf
);
87 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id
);
89 if (rte_event_dev_start(dev_id
))
98 port_conf
= &def_port_conf
;
99 ret
= rte_event_port_default_conf_get(dev_id
, port_id
,
105 ret
= rte_event_port_setup(dev_id
, port_id
, port_conf
);
107 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
112 *event_port_id
= port_id
;
115 ret
= rte_event_dev_start(dev_id
);
120 struct rte_event_timer_adapter
*
121 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf
*conf
)
123 return rte_event_timer_adapter_create_ext(conf
, default_port_conf_cb
,
127 struct rte_event_timer_adapter
*
128 rte_event_timer_adapter_create_ext(
129 const struct rte_event_timer_adapter_conf
*conf
,
130 rte_event_timer_adapter_port_conf_cb_t conf_cb
,
134 struct rte_event_timer_adapter
*adapter
;
135 const struct rte_memzone
*mz
;
136 char mz_name
[DATA_MZ_NAME_MAX_LEN
];
138 struct rte_eventdev
*dev
;
145 /* Check eventdev ID */
146 if (!rte_event_pmd_is_valid_dev(conf
->event_dev_id
)) {
150 dev
= &rte_eventdevs
[conf
->event_dev_id
];
152 adapter_id
= conf
->timer_adapter_id
;
154 /* Check that adapter_id is in range */
155 if (adapter_id
>= RTE_EVENT_TIMER_ADAPTER_NUM_MAX
) {
160 /* Check adapter ID not already allocated */
161 adapter
= &adapters
[adapter_id
];
162 if (adapter
->allocated
) {
167 /* Create shared data area. */
168 n
= snprintf(mz_name
, sizeof(mz_name
), DATA_MZ_NAME_FORMAT
, adapter_id
);
169 if (n
>= (int)sizeof(mz_name
)) {
173 mz
= rte_memzone_reserve(mz_name
,
174 sizeof(struct rte_event_timer_adapter_data
),
177 /* rte_errno set by rte_memzone_reserve */
180 adapter
->data
= mz
->addr
;
181 memset(adapter
->data
, 0, sizeof(struct rte_event_timer_adapter_data
));
183 adapter
->data
->mz
= mz
;
184 adapter
->data
->event_dev_id
= conf
->event_dev_id
;
185 adapter
->data
->id
= adapter_id
;
186 adapter
->data
->socket_id
= conf
->socket_id
;
187 adapter
->data
->conf
= *conf
; /* copy conf structure */
189 /* Query eventdev PMD for timer adapter capabilities and ops */
190 ret
= dev
->dev_ops
->timer_adapter_caps_get(dev
,
191 adapter
->data
->conf
.flags
,
192 &adapter
->data
->caps
,
199 if (!(adapter
->data
->caps
&
200 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT
)) {
201 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb
, -EINVAL
);
202 ret
= conf_cb(adapter
->data
->id
, adapter
->data
->event_dev_id
,
203 &adapter
->data
->event_port_id
, conf_arg
);
210 /* If eventdev PMD did not provide ops, use default software
213 if (adapter
->ops
== NULL
)
214 adapter
->ops
= &sw_event_adapter_timer_ops
;
216 /* Allow driver to do some setup */
217 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter
->ops
->init
, -ENOTSUP
);
218 ret
= adapter
->ops
->init(adapter
);
224 /* Set fast-path function pointers */
225 adapter
->arm_burst
= adapter
->ops
->arm_burst
;
226 adapter
->arm_tmo_tick_burst
= adapter
->ops
->arm_tmo_tick_burst
;
227 adapter
->cancel_burst
= adapter
->ops
->cancel_burst
;
229 adapter
->allocated
= 1;
234 rte_memzone_free(adapter
->data
->mz
);
239 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter
*adapter
,
240 struct rte_event_timer_adapter_info
*adapter_info
)
242 ADAPTER_VALID_OR_ERR_RET(adapter
, -EINVAL
);
244 if (adapter
->ops
->get_info
)
245 /* let driver set values it knows */
246 adapter
->ops
->get_info(adapter
, adapter_info
);
248 /* Set common values */
249 adapter_info
->conf
= adapter
->data
->conf
;
250 adapter_info
->event_dev_port_id
= adapter
->data
->event_port_id
;
251 adapter_info
->caps
= adapter
->data
->caps
;
257 rte_event_timer_adapter_start(const struct rte_event_timer_adapter
*adapter
)
261 ADAPTER_VALID_OR_ERR_RET(adapter
, -EINVAL
);
262 FUNC_PTR_OR_ERR_RET(adapter
->ops
->start
, -EINVAL
);
264 if (adapter
->data
->started
) {
265 EVTIM_LOG_ERR("event timer adapter %"PRIu8
" already started",
270 ret
= adapter
->ops
->start(adapter
);
274 adapter
->data
->started
= 1;
280 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter
*adapter
)
284 ADAPTER_VALID_OR_ERR_RET(adapter
, -EINVAL
);
285 FUNC_PTR_OR_ERR_RET(adapter
->ops
->stop
, -EINVAL
);
287 if (adapter
->data
->started
== 0) {
288 EVTIM_LOG_ERR("event timer adapter %"PRIu8
" already stopped",
293 ret
= adapter
->ops
->stop(adapter
);
297 adapter
->data
->started
= 0;
302 struct rte_event_timer_adapter
*
303 rte_event_timer_adapter_lookup(uint16_t adapter_id
)
305 char name
[DATA_MZ_NAME_MAX_LEN
];
306 const struct rte_memzone
*mz
;
307 struct rte_event_timer_adapter_data
*data
;
308 struct rte_event_timer_adapter
*adapter
;
310 struct rte_eventdev
*dev
;
312 if (adapters
[adapter_id
].allocated
)
313 return &adapters
[adapter_id
]; /* Adapter is already loaded */
315 snprintf(name
, DATA_MZ_NAME_MAX_LEN
, DATA_MZ_NAME_FORMAT
, adapter_id
);
316 mz
= rte_memzone_lookup(name
);
324 adapter
= &adapters
[data
->id
];
325 adapter
->data
= data
;
327 dev
= &rte_eventdevs
[adapter
->data
->event_dev_id
];
329 /* Query eventdev PMD for timer adapter capabilities and ops */
330 ret
= dev
->dev_ops
->timer_adapter_caps_get(dev
,
331 adapter
->data
->conf
.flags
,
332 &adapter
->data
->caps
,
339 /* If eventdev PMD did not provide ops, use default software
342 if (adapter
->ops
== NULL
)
343 adapter
->ops
= &sw_event_adapter_timer_ops
;
345 /* Set fast-path function pointers */
346 adapter
->arm_burst
= adapter
->ops
->arm_burst
;
347 adapter
->arm_tmo_tick_burst
= adapter
->ops
->arm_tmo_tick_burst
;
348 adapter
->cancel_burst
= adapter
->ops
->cancel_burst
;
350 adapter
->allocated
= 1;
356 rte_event_timer_adapter_free(struct rte_event_timer_adapter
*adapter
)
360 ADAPTER_VALID_OR_ERR_RET(adapter
, -EINVAL
);
361 FUNC_PTR_OR_ERR_RET(adapter
->ops
->uninit
, -EINVAL
);
363 if (adapter
->data
->started
== 1) {
364 EVTIM_LOG_ERR("event timer adapter %"PRIu8
" must be stopped "
365 "before freeing", adapter
->data
->id
);
369 /* free impl priv data */
370 ret
= adapter
->ops
->uninit(adapter
);
374 /* free shared data area */
375 ret
= rte_memzone_free(adapter
->data
->mz
);
379 adapter
->data
= NULL
;
380 adapter
->allocated
= 0;
386 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter
*adapter
,
387 uint32_t *service_id
)
389 ADAPTER_VALID_OR_ERR_RET(adapter
, -EINVAL
);
391 if (adapter
->data
->service_inited
&& service_id
!= NULL
)
392 *service_id
= adapter
->data
->service_id
;
394 return adapter
->data
->service_inited
? 0 : -ESRCH
;
398 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter
*adapter
,
399 struct rte_event_timer_adapter_stats
*stats
)
401 ADAPTER_VALID_OR_ERR_RET(adapter
, -EINVAL
);
402 FUNC_PTR_OR_ERR_RET(adapter
->ops
->stats_get
, -EINVAL
);
406 return adapter
->ops
->stats_get(adapter
, stats
);
410 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter
*adapter
)
412 ADAPTER_VALID_OR_ERR_RET(adapter
, -EINVAL
);
413 FUNC_PTR_OR_ERR_RET(adapter
->ops
->stats_reset
, -EINVAL
);
414 return adapter
->ops
->stats_reset(adapter
);
418 * Software event timer adapter buffer helper functions
421 #define NSECPERSEC 1E9
423 /* Optimizations used to index into the buffer require that the buffer size
426 #define EVENT_BUFFER_SZ 4096
427 #define EVENT_BUFFER_BATCHSZ 32
428 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
430 struct event_buffer
{
433 struct rte_event events
[EVENT_BUFFER_SZ
];
434 } __rte_cache_aligned
;
437 event_buffer_full(struct event_buffer
*bufp
)
439 return (bufp
->head
- bufp
->tail
) == EVENT_BUFFER_SZ
;
443 event_buffer_batch_ready(struct event_buffer
*bufp
)
445 return (bufp
->head
- bufp
->tail
) >= EVENT_BUFFER_BATCHSZ
;
449 event_buffer_init(struct event_buffer
*bufp
)
451 bufp
->head
= bufp
->tail
= 0;
452 memset(&bufp
->events
, 0, sizeof(struct rte_event
) * EVENT_BUFFER_SZ
);
456 event_buffer_add(struct event_buffer
*bufp
, struct rte_event
*eventp
)
459 struct rte_event
*buf_eventp
;
461 if (event_buffer_full(bufp
))
464 /* Instead of modulus, bitwise AND with mask to get head_idx. */
465 head_idx
= bufp
->head
& EVENT_BUFFER_MASK
;
466 buf_eventp
= &bufp
->events
[head_idx
];
467 rte_memcpy(buf_eventp
, eventp
, sizeof(struct rte_event
));
469 /* Wrap automatically when overflow occurs. */
476 event_buffer_flush(struct event_buffer
*bufp
, uint8_t dev_id
, uint8_t port_id
,
477 uint16_t *nb_events_flushed
,
478 uint16_t *nb_events_inv
)
480 uint16_t head_idx
, tail_idx
, n
= 0;
481 struct rte_event
*events
= bufp
->events
;
483 /* Instead of modulus, bitwise AND with mask to get index. */
484 head_idx
= bufp
->head
& EVENT_BUFFER_MASK
;
485 tail_idx
= bufp
->tail
& EVENT_BUFFER_MASK
;
487 /* Determine the largest contigous run we can attempt to enqueue to the
490 if (head_idx
> tail_idx
)
491 n
= head_idx
- tail_idx
;
492 else if (head_idx
< tail_idx
)
493 n
= EVENT_BUFFER_SZ
- tail_idx
;
495 *nb_events_flushed
= 0;
500 *nb_events_flushed
= rte_event_enqueue_burst(dev_id
, port_id
,
501 &events
[tail_idx
], n
);
502 if (*nb_events_flushed
!= n
&& rte_errno
== -EINVAL
) {
503 EVTIM_LOG_ERR("failed to enqueue invalid event - dropping it");
507 bufp
->tail
= bufp
->tail
+ *nb_events_flushed
+ *nb_events_inv
;
511 * Software event timer adapter implementation
514 struct rte_event_timer_adapter_sw_data
{
515 /* List of messages for outstanding timers */
516 TAILQ_HEAD(, msg
) msgs_tailq_head
;
517 /* Lock to guard tailq and armed count */
518 rte_spinlock_t msgs_tailq_sl
;
519 /* Identifier of service executing timer management logic. */
521 /* The cycle count at which the adapter should next tick */
522 uint64_t next_tick_cycles
;
523 /* Incremented as the service moves through phases of an iteration */
524 volatile int service_phase
;
525 /* The tick resolution used by adapter instance. May have been
526 * adjusted from what user requested
528 uint64_t timer_tick_ns
;
529 /* Maximum timeout in nanoseconds allowed by adapter instance. */
531 /* Ring containing messages to arm or cancel event timers */
532 struct rte_ring
*msg_ring
;
533 /* Mempool containing msg objects */
534 struct rte_mempool
*msg_pool
;
535 /* Buffered timer expiry events to be enqueued to an event device. */
536 struct event_buffer buffer
;
538 struct rte_event_timer_adapter_stats stats
;
539 /* The number of threads currently adding to the message ring */
540 rte_atomic16_t message_producer_count
;
543 enum msg_type
{MSG_TYPE_ARM
, MSG_TYPE_CANCEL
};
547 struct rte_event_timer
*evtim
;
548 struct rte_timer tim
;
549 TAILQ_ENTRY(msg
) msgs
;
553 sw_event_timer_cb(struct rte_timer
*tim
, void *arg
)
556 uint16_t nb_evs_flushed
= 0;
557 uint16_t nb_evs_invalid
= 0;
559 struct rte_event_timer
*evtim
;
560 struct rte_event_timer_adapter
*adapter
;
561 struct rte_event_timer_adapter_sw_data
*sw_data
;
564 opaque
= evtim
->impl_opaque
[1];
565 adapter
= (struct rte_event_timer_adapter
*)(uintptr_t)opaque
;
566 sw_data
= adapter
->data
->adapter_priv
;
568 ret
= event_buffer_add(&sw_data
->buffer
, &evtim
->ev
);
570 /* If event buffer is full, put timer back in list with
571 * immediate expiry value, so that we process it again on the
574 rte_timer_reset_sync(tim
, 0, SINGLE
, rte_lcore_id(),
575 sw_event_timer_cb
, evtim
);
577 sw_data
->stats
.evtim_retry_count
++;
578 EVTIM_LOG_DBG("event buffer full, resetting rte_timer with "
579 "immediate expiry value");
581 struct msg
*m
= container_of(tim
, struct msg
, tim
);
582 TAILQ_REMOVE(&sw_data
->msgs_tailq_head
, m
, msgs
);
583 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
584 evtim
->state
= RTE_EVENT_TIMER_NOT_ARMED
;
586 /* Free the msg object containing the rte_timer now that
587 * we've buffered its event successfully.
589 rte_mempool_put(sw_data
->msg_pool
, m
);
591 /* Bump the count when we successfully add an expiry event to
594 sw_data
->stats
.evtim_exp_count
++;
597 if (event_buffer_batch_ready(&sw_data
->buffer
)) {
598 event_buffer_flush(&sw_data
->buffer
,
599 adapter
->data
->event_dev_id
,
600 adapter
->data
->event_port_id
,
604 sw_data
->stats
.ev_enq_count
+= nb_evs_flushed
;
605 sw_data
->stats
.ev_inv_count
+= nb_evs_invalid
;
609 static __rte_always_inline
uint64_t
610 get_timeout_cycles(struct rte_event_timer
*evtim
,
611 struct rte_event_timer_adapter
*adapter
)
614 struct rte_event_timer_adapter_sw_data
*sw_data
;
616 sw_data
= adapter
->data
->adapter_priv
;
617 timeout_ns
= evtim
->timeout_ticks
* sw_data
->timer_tick_ns
;
618 return timeout_ns
* rte_get_timer_hz() / NSECPERSEC
;
622 /* This function returns true if one or more (adapter) ticks have occurred since
623 * the last time it was called.
626 adapter_did_tick(struct rte_event_timer_adapter
*adapter
)
628 uint64_t cycles_per_adapter_tick
, start_cycles
;
629 uint64_t *next_tick_cyclesp
;
630 struct rte_event_timer_adapter_sw_data
*sw_data
;
632 sw_data
= adapter
->data
->adapter_priv
;
633 next_tick_cyclesp
= &sw_data
->next_tick_cycles
;
635 cycles_per_adapter_tick
= sw_data
->timer_tick_ns
*
636 (rte_get_timer_hz() / NSECPERSEC
);
638 start_cycles
= rte_get_timer_cycles();
640 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
641 * execute, and set things going.
644 if (start_cycles
>= *next_tick_cyclesp
) {
645 /* Snap the current cycle count to the preceding adapter tick
648 start_cycles
-= start_cycles
% cycles_per_adapter_tick
;
650 *next_tick_cyclesp
= start_cycles
+ cycles_per_adapter_tick
;
658 /* Check that event timer timeout value is in range */
659 static __rte_always_inline
int
660 check_timeout(struct rte_event_timer
*evtim
,
661 const struct rte_event_timer_adapter
*adapter
)
664 struct rte_event_timer_adapter_sw_data
*sw_data
;
666 sw_data
= adapter
->data
->adapter_priv
;
667 tmo_nsec
= evtim
->timeout_ticks
* sw_data
->timer_tick_ns
;
669 if (tmo_nsec
> sw_data
->max_tmo_ns
)
672 if (tmo_nsec
< sw_data
->timer_tick_ns
)
678 /* Check that event timer event queue sched type matches destination event queue
681 static __rte_always_inline
int
682 check_destination_event_queue(struct rte_event_timer
*evtim
,
683 const struct rte_event_timer_adapter
*adapter
)
688 ret
= rte_event_queue_attr_get(adapter
->data
->event_dev_id
,
690 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE
,
693 if ((ret
< 0 && ret
!= -EOVERFLOW
) ||
694 evtim
->ev
.sched_type
!= sched_type
)
702 sw_event_timer_adapter_service_func(void *arg
)
705 uint64_t cycles
, opaque
;
706 uint16_t nb_evs_flushed
= 0;
707 uint16_t nb_evs_invalid
= 0;
708 struct rte_event_timer_adapter
*adapter
;
709 struct rte_event_timer_adapter_sw_data
*sw_data
;
710 struct rte_event_timer
*evtim
= NULL
;
711 struct rte_timer
*tim
= NULL
;
712 struct msg
*msg
, *msgs
[NB_OBJS
];
715 sw_data
= adapter
->data
->adapter_priv
;
717 sw_data
->service_phase
= 1;
720 while (rte_atomic16_read(&sw_data
->message_producer_count
) > 0 ||
721 !rte_ring_empty(sw_data
->msg_ring
)) {
723 num_msgs
= rte_ring_dequeue_burst(sw_data
->msg_ring
,
724 (void **)msgs
, NB_OBJS
, NULL
);
726 for (i
= 0; i
< num_msgs
; i
++) {
736 EVTIM_SVC_LOG_DBG("dequeued ARM message from "
740 cycles
= get_timeout_cycles(evtim
,
742 ret
= rte_timer_reset(tim
, cycles
, SINGLE
,
746 RTE_ASSERT(ret
== 0);
748 evtim
->impl_opaque
[0] = (uintptr_t)tim
;
749 evtim
->impl_opaque
[1] = (uintptr_t)adapter
;
751 TAILQ_INSERT_TAIL(&sw_data
->msgs_tailq_head
,
755 case MSG_TYPE_CANCEL
:
756 EVTIM_SVC_LOG_DBG("dequeued CANCEL message "
758 opaque
= evtim
->impl_opaque
[0];
759 tim
= (struct rte_timer
*)(uintptr_t)opaque
;
760 RTE_ASSERT(tim
!= NULL
);
762 ret
= rte_timer_stop(tim
);
763 RTE_ASSERT(ret
== 0);
765 /* Free the msg object for the original arm
769 m
= container_of(tim
, struct msg
, tim
);
770 TAILQ_REMOVE(&sw_data
->msgs_tailq_head
, m
,
772 rte_mempool_put(sw_data
->msg_pool
, m
);
774 /* Free the msg object for the current msg */
775 rte_mempool_put(sw_data
->msg_pool
, msg
);
777 evtim
->impl_opaque
[0] = 0;
778 evtim
->impl_opaque
[1] = 0;
785 sw_data
->service_phase
= 2;
788 if (adapter_did_tick(adapter
)) {
791 event_buffer_flush(&sw_data
->buffer
,
792 adapter
->data
->event_dev_id
,
793 adapter
->data
->event_port_id
,
794 &nb_evs_flushed
, &nb_evs_invalid
);
796 sw_data
->stats
.ev_enq_count
+= nb_evs_flushed
;
797 sw_data
->stats
.ev_inv_count
+= nb_evs_invalid
;
798 sw_data
->stats
.adapter_tick_count
++;
801 sw_data
->service_phase
= 0;
807 /* The adapter initialization function rounds the mempool size up to the next
808 * power of 2, so we can take the difference between that value and what the
809 * user requested, and use the space for caches. This avoids a scenario where a
810 * user can't arm the number of timers the adapter was configured with because
811 * mempool objects have been lost to caches.
813 * nb_actual should always be a power of 2, so we can iterate over the powers
814 * of 2 to see what the largest cache size we can use is.
817 compute_msg_mempool_cache_size(uint64_t nb_requested
, uint64_t nb_actual
)
826 if (RTE_MAX_LCORE
* size
< (int)(nb_actual
- nb_requested
) &&
827 size
< RTE_MEMPOOL_CACHE_MAX_SIZE
&&
828 size
<= nb_actual
/ 1.5)
837 #define SW_MIN_INTERVAL 1E5
840 sw_event_timer_adapter_init(struct rte_event_timer_adapter
*adapter
)
843 struct rte_event_timer_adapter_sw_data
*sw_data
;
846 struct rte_service_spec service
;
847 static bool timer_subsystem_inited
; // static initialized to false
849 /* Allocate storage for SW implementation data */
850 char priv_data_name
[RTE_RING_NAMESIZE
];
851 snprintf(priv_data_name
, RTE_RING_NAMESIZE
, "sw_evtim_adap_priv_%"PRIu8
,
853 adapter
->data
->adapter_priv
= rte_zmalloc_socket(
855 sizeof(struct rte_event_timer_adapter_sw_data
),
857 adapter
->data
->socket_id
);
858 if (adapter
->data
->adapter_priv
== NULL
) {
859 EVTIM_LOG_ERR("failed to allocate space for private data");
864 if (adapter
->data
->conf
.timer_tick_ns
< SW_MIN_INTERVAL
) {
865 EVTIM_LOG_ERR("failed to create adapter with requested tick "
871 sw_data
= adapter
->data
->adapter_priv
;
873 sw_data
->timer_tick_ns
= adapter
->data
->conf
.timer_tick_ns
;
874 sw_data
->max_tmo_ns
= adapter
->data
->conf
.max_tmo_ns
;
876 TAILQ_INIT(&sw_data
->msgs_tailq_head
);
877 rte_spinlock_init(&sw_data
->msgs_tailq_sl
);
878 rte_atomic16_init(&sw_data
->message_producer_count
);
880 /* Rings require power of 2, so round up to next such value */
881 nb_timers
= rte_align64pow2(adapter
->data
->conf
.nb_timers
);
883 char msg_ring_name
[RTE_RING_NAMESIZE
];
884 snprintf(msg_ring_name
, RTE_RING_NAMESIZE
,
885 "sw_evtim_adap_msg_ring_%"PRIu8
, adapter
->data
->id
);
886 flags
= adapter
->data
->conf
.flags
& RTE_EVENT_TIMER_ADAPTER_F_SP_PUT
?
887 RING_F_SP_ENQ
| RING_F_SC_DEQ
:
889 sw_data
->msg_ring
= rte_ring_create(msg_ring_name
, nb_timers
,
890 adapter
->data
->socket_id
, flags
);
891 if (sw_data
->msg_ring
== NULL
) {
892 EVTIM_LOG_ERR("failed to create message ring");
897 char pool_name
[RTE_RING_NAMESIZE
];
898 snprintf(pool_name
, RTE_RING_NAMESIZE
, "sw_evtim_adap_msg_pool_%"PRIu8
,
901 /* Both the arming/canceling thread and the service thread will do puts
902 * to the mempool, but if the SP_PUT flag is enabled, we can specify
903 * single-consumer get for the mempool.
905 flags
= adapter
->data
->conf
.flags
& RTE_EVENT_TIMER_ADAPTER_F_SP_PUT
?
906 MEMPOOL_F_SC_GET
: 0;
908 /* The usable size of a ring is count - 1, so subtract one here to
909 * make the counts agree.
911 int pool_size
= nb_timers
- 1;
912 int cache_size
= compute_msg_mempool_cache_size(
913 adapter
->data
->conf
.nb_timers
, nb_timers
);
914 sw_data
->msg_pool
= rte_mempool_create(pool_name
, pool_size
,
915 sizeof(struct msg
), cache_size
,
916 0, NULL
, NULL
, NULL
, NULL
,
917 adapter
->data
->socket_id
, flags
);
918 if (sw_data
->msg_pool
== NULL
) {
919 EVTIM_LOG_ERR("failed to create message object mempool");
924 event_buffer_init(&sw_data
->buffer
);
926 /* Register a service component to run adapter logic */
927 memset(&service
, 0, sizeof(service
));
928 snprintf(service
.name
, RTE_SERVICE_NAME_MAX
,
929 "sw_evimer_adap_svc_%"PRIu8
, adapter
->data
->id
);
930 service
.socket_id
= adapter
->data
->socket_id
;
931 service
.callback
= sw_event_timer_adapter_service_func
;
932 service
.callback_userdata
= adapter
;
933 service
.capabilities
&= ~(RTE_SERVICE_CAP_MT_SAFE
);
934 ret
= rte_service_component_register(&service
, &sw_data
->service_id
);
936 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
937 ": err = %d", service
.name
, sw_data
->service_id
,
944 EVTIM_LOG_DBG("registered service %s with id %"PRIu32
, service
.name
,
945 sw_data
->service_id
);
947 adapter
->data
->service_id
= sw_data
->service_id
;
948 adapter
->data
->service_inited
= 1;
950 if (!timer_subsystem_inited
) {
951 rte_timer_subsystem_init();
952 timer_subsystem_inited
= true;
958 rte_mempool_free(sw_data
->msg_pool
);
960 rte_ring_free(sw_data
->msg_ring
);
967 sw_event_timer_adapter_uninit(struct rte_event_timer_adapter
*adapter
)
971 struct rte_event_timer_adapter_sw_data
*sw_data
=
972 adapter
->data
->adapter_priv
;
974 rte_spinlock_lock(&sw_data
->msgs_tailq_sl
);
976 /* Cancel outstanding rte_timers and free msg objects */
977 m1
= TAILQ_FIRST(&sw_data
->msgs_tailq_head
);
979 EVTIM_LOG_DBG("freeing outstanding timer");
980 m2
= TAILQ_NEXT(m1
, msgs
);
982 rte_timer_stop_sync(&m1
->tim
);
983 rte_mempool_put(sw_data
->msg_pool
, m1
);
988 rte_spinlock_unlock(&sw_data
->msgs_tailq_sl
);
990 ret
= rte_service_component_unregister(sw_data
->service_id
);
992 EVTIM_LOG_ERR("failed to unregister service component");
996 rte_ring_free(sw_data
->msg_ring
);
997 rte_mempool_free(sw_data
->msg_pool
);
998 rte_free(adapter
->data
->adapter_priv
);
1003 static inline int32_t
1004 get_mapped_count_for_service(uint32_t service_id
)
1006 int32_t core_count
, i
, mapped_count
= 0;
1007 uint32_t lcore_arr
[RTE_MAX_LCORE
];
1009 core_count
= rte_service_lcore_list(lcore_arr
, RTE_MAX_LCORE
);
1011 for (i
= 0; i
< core_count
; i
++)
1012 if (rte_service_map_lcore_get(service_id
, lcore_arr
[i
]) == 1)
1015 return mapped_count
;
1019 sw_event_timer_adapter_start(const struct rte_event_timer_adapter
*adapter
)
1022 struct rte_event_timer_adapter_sw_data
*sw_data
;
1024 sw_data
= adapter
->data
->adapter_priv
;
1026 /* Mapping the service to more than one service core can introduce
1027 * delays while one thread is waiting to acquire a lock, so only allow
1028 * one core to be mapped to the service.
1030 mapped_count
= get_mapped_count_for_service(sw_data
->service_id
);
1032 if (mapped_count
== 1)
1033 return rte_service_component_runstate_set(sw_data
->service_id
,
1036 return mapped_count
< 1 ? -ENOENT
: -ENOTSUP
;
1040 sw_event_timer_adapter_stop(const struct rte_event_timer_adapter
*adapter
)
1043 struct rte_event_timer_adapter_sw_data
*sw_data
=
1044 adapter
->data
->adapter_priv
;
1046 ret
= rte_service_component_runstate_set(sw_data
->service_id
, 0);
1050 /* Wait for the service to complete its final iteration before
1053 while (sw_data
->service_phase
!= 0)
1062 sw_event_timer_adapter_get_info(const struct rte_event_timer_adapter
*adapter
,
1063 struct rte_event_timer_adapter_info
*adapter_info
)
1065 struct rte_event_timer_adapter_sw_data
*sw_data
;
1066 sw_data
= adapter
->data
->adapter_priv
;
1068 adapter_info
->min_resolution_ns
= sw_data
->timer_tick_ns
;
1069 adapter_info
->max_tmo_ns
= sw_data
->max_tmo_ns
;
1073 sw_event_timer_adapter_stats_get(const struct rte_event_timer_adapter
*adapter
,
1074 struct rte_event_timer_adapter_stats
*stats
)
1076 struct rte_event_timer_adapter_sw_data
*sw_data
;
1077 sw_data
= adapter
->data
->adapter_priv
;
1078 *stats
= sw_data
->stats
;
1083 sw_event_timer_adapter_stats_reset(
1084 const struct rte_event_timer_adapter
*adapter
)
1086 struct rte_event_timer_adapter_sw_data
*sw_data
;
1087 sw_data
= adapter
->data
->adapter_priv
;
1088 memset(&sw_data
->stats
, 0, sizeof(sw_data
->stats
));
1092 static __rte_always_inline
uint16_t
1093 __sw_event_timer_arm_burst(const struct rte_event_timer_adapter
*adapter
,
1094 struct rte_event_timer
**evtims
,
1099 struct rte_event_timer_adapter_sw_data
*sw_data
;
1100 struct msg
*msgs
[nb_evtims
];
1102 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1103 /* Check that the service is running. */
1104 if (rte_service_runstate_get(adapter
->data
->service_id
) != 1) {
1110 sw_data
= adapter
->data
->adapter_priv
;
1112 ret
= rte_mempool_get_bulk(sw_data
->msg_pool
, (void **)msgs
, nb_evtims
);
1118 /* Let the service know we're producing messages for it to process */
1119 rte_atomic16_inc(&sw_data
->message_producer_count
);
1121 /* If the service is managing timers, wait for it to finish */
1122 while (sw_data
->service_phase
== 2)
1127 for (i
= 0; i
< nb_evtims
; i
++) {
1128 /* Don't modify the event timer state in these cases */
1129 if (evtims
[i
]->state
== RTE_EVENT_TIMER_ARMED
) {
1130 rte_errno
= EALREADY
;
1132 } else if (!(evtims
[i
]->state
== RTE_EVENT_TIMER_NOT_ARMED
||
1133 evtims
[i
]->state
== RTE_EVENT_TIMER_CANCELED
)) {
1138 ret
= check_timeout(evtims
[i
], adapter
);
1140 evtims
[i
]->state
= RTE_EVENT_TIMER_ERROR_TOOLATE
;
1145 evtims
[i
]->state
= RTE_EVENT_TIMER_ERROR_TOOEARLY
;
1150 if (check_destination_event_queue(evtims
[i
], adapter
) < 0) {
1151 evtims
[i
]->state
= RTE_EVENT_TIMER_ERROR
;
1156 /* Checks passed, set up a message to enqueue */
1157 msgs
[i
]->type
= MSG_TYPE_ARM
;
1158 msgs
[i
]->evtim
= evtims
[i
];
1160 /* Set the payload pointer if not set. */
1161 if (evtims
[i
]->ev
.event_ptr
== NULL
)
1162 evtims
[i
]->ev
.event_ptr
= evtims
[i
];
1164 /* msg objects that get enqueued successfully will be freed
1165 * either by a future cancel operation or by the timer
1166 * expiration callback.
1168 if (rte_ring_enqueue(sw_data
->msg_ring
, msgs
[i
]) < 0) {
1173 EVTIM_LOG_DBG("enqueued ARM message to ring");
1175 evtims
[i
]->state
= RTE_EVENT_TIMER_ARMED
;
1178 /* Let the service know we're done producing messages */
1179 rte_atomic16_dec(&sw_data
->message_producer_count
);
1182 rte_mempool_put_bulk(sw_data
->msg_pool
, (void **)&msgs
[i
],
1189 sw_event_timer_arm_burst(const struct rte_event_timer_adapter
*adapter
,
1190 struct rte_event_timer
**evtims
,
1193 return __sw_event_timer_arm_burst(adapter
, evtims
, nb_evtims
);
1197 sw_event_timer_cancel_burst(const struct rte_event_timer_adapter
*adapter
,
1198 struct rte_event_timer
**evtims
,
1203 struct rte_event_timer_adapter_sw_data
*sw_data
;
1204 struct msg
*msgs
[nb_evtims
];
1206 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1207 /* Check that the service is running. */
1208 if (rte_service_runstate_get(adapter
->data
->service_id
) != 1) {
1214 sw_data
= adapter
->data
->adapter_priv
;
1216 ret
= rte_mempool_get_bulk(sw_data
->msg_pool
, (void **)msgs
, nb_evtims
);
1222 /* Let the service know we're producing messages for it to process */
1223 rte_atomic16_inc(&sw_data
->message_producer_count
);
1225 /* If the service could be modifying event timer states, wait */
1226 while (sw_data
->service_phase
== 2)
1231 for (i
= 0; i
< nb_evtims
; i
++) {
1232 /* Don't modify the event timer state in these cases */
1233 if (evtims
[i
]->state
== RTE_EVENT_TIMER_CANCELED
) {
1234 rte_errno
= EALREADY
;
1236 } else if (evtims
[i
]->state
!= RTE_EVENT_TIMER_ARMED
) {
1241 msgs
[i
]->type
= MSG_TYPE_CANCEL
;
1242 msgs
[i
]->evtim
= evtims
[i
];
1244 if (rte_ring_enqueue(sw_data
->msg_ring
, msgs
[i
]) < 0) {
1249 EVTIM_LOG_DBG("enqueued CANCEL message to ring");
1251 evtims
[i
]->state
= RTE_EVENT_TIMER_CANCELED
;
1254 /* Let the service know we're done producing messages */
1255 rte_atomic16_dec(&sw_data
->message_producer_count
);
1258 rte_mempool_put_bulk(sw_data
->msg_pool
, (void **)&msgs
[i
],
1265 sw_event_timer_arm_tmo_tick_burst(const struct rte_event_timer_adapter
*adapter
,
1266 struct rte_event_timer
**evtims
,
1267 uint64_t timeout_ticks
,
1272 for (i
= 0; i
< nb_evtims
; i
++)
1273 evtims
[i
]->timeout_ticks
= timeout_ticks
;
1275 return __sw_event_timer_arm_burst(adapter
, evtims
, nb_evtims
);
1278 static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops
= {
1279 .init
= sw_event_timer_adapter_init
,
1280 .uninit
= sw_event_timer_adapter_uninit
,
1281 .start
= sw_event_timer_adapter_start
,
1282 .stop
= sw_event_timer_adapter_stop
,
1283 .get_info
= sw_event_timer_adapter_get_info
,
1284 .stats_get
= sw_event_timer_adapter_stats_get
,
1285 .stats_reset
= sw_event_timer_adapter_stats_reset
,
1286 .arm_burst
= sw_event_timer_arm_burst
,
1287 .arm_tmo_tick_burst
= sw_event_timer_arm_tmo_tick_burst
,
1288 .cancel_burst
= sw_event_timer_cancel_burst
,
1291 RTE_INIT(event_timer_adapter_init_log
)
1293 evtim_logtype
= rte_log_register("lib.eventdev.adapter.timer");
1294 if (evtim_logtype
>= 0)
1295 rte_log_set_level(evtim_logtype
, RTE_LOG_NOTICE
);
1297 evtim_buffer_logtype
= rte_log_register("lib.eventdev.adapter.timer."
1299 if (evtim_buffer_logtype
>= 0)
1300 rte_log_set_level(evtim_buffer_logtype
, RTE_LOG_NOTICE
);
1302 evtim_svc_logtype
= rte_log_register("lib.eventdev.adapter.timer.svc");
1303 if (evtim_svc_logtype
>= 0)
1304 rte_log_set_level(evtim_svc_logtype
, RTE_LOG_NOTICE
);