1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
8 #include <rte_common.h>
10 #include <rte_eventdev.h>
11 #include <rte_service.h>
13 #define CLNRM "\x1b[0m"
14 #define CLRED "\x1b[31m"
15 #define CLGRN "\x1b[32m"
16 #define CLYEL "\x1b[33m"
18 #define evt_err(fmt, args...) \
19 fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
21 #define evt_info(fmt, args...) \
22 fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
24 #define EVT_STR_FMT 20
26 #define evt_dump(str, fmt, val...) \
27 printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
29 #define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
31 #define evt_dump_end printf("\b}\n")
33 #define EVT_MAX_STAGES 64
34 #define EVT_MAX_PORTS 256
35 #define EVT_MAX_QUEUES 256
39 EVT_PROD_TYPE_SYNT
, /* Producer type Synthetic i.e. CPU. */
40 EVT_PROD_TYPE_ETH_RX_ADPTR
, /* Producer type Eth Rx Adapter. */
41 EVT_PROD_TYPE_EVENT_TIMER_ADPTR
, /* Producer type Timer Adapter. */
46 #define EVT_TEST_NAME_MAX_LEN 32
47 char test_name
[EVT_TEST_NAME_MAX_LEN
];
48 bool plcores
[RTE_MAX_LCORE
];
49 bool wlcores
[RTE_MAX_LCORE
];
50 uint8_t sched_type_list
[EVT_MAX_STAGES
];
57 uint8_t nb_timer_adptrs
;
59 uint64_t timer_tick_nsec
;
60 uint64_t optm_timer_tick_nsec
;
61 uint64_t max_tmo_nsec
;
66 uint32_t fwd_latency
:1;
67 uint32_t q_priority
:1;
68 uint32_t deq_tmo_nsec
;
69 enum evt_prod_type prod_type
;
70 uint8_t timdev_use_burst
;
75 evt_has_distributed_sched(uint8_t dev_id
)
77 struct rte_event_dev_info dev_info
;
79 rte_event_dev_info_get(dev_id
, &dev_info
);
80 return (dev_info
.event_dev_cap
& RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
) ?
85 evt_has_burst_mode(uint8_t dev_id
)
87 struct rte_event_dev_info dev_info
;
89 rte_event_dev_info_get(dev_id
, &dev_info
);
90 return (dev_info
.event_dev_cap
& RTE_EVENT_DEV_CAP_BURST_MODE
) ?
96 evt_has_all_types_queue(uint8_t dev_id
)
98 struct rte_event_dev_info dev_info
;
100 rte_event_dev_info_get(dev_id
, &dev_info
);
101 return (dev_info
.event_dev_cap
& RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES
) ?
106 evt_service_setup(uint32_t service_id
)
109 unsigned int lcore
= 0;
110 uint32_t core_array
[RTE_MAX_LCORE
];
112 uint8_t min_cnt
= UINT8_MAX
;
114 if (!rte_service_lcore_count())
117 core_cnt
= rte_service_lcore_list(core_array
,
121 /* Get the core which has least number of services running. */
123 /* Reset default mapping */
124 rte_service_map_lcore_set(service_id
,
125 core_array
[core_cnt
], 0);
126 cnt
= rte_service_lcore_count_services(
127 core_array
[core_cnt
]);
129 lcore
= core_array
[core_cnt
];
133 if (rte_service_map_lcore_set(service_id
, lcore
, 1))
140 evt_configure_eventdev(struct evt_options
*opt
, uint8_t nb_queues
,
143 struct rte_event_dev_info info
;
146 memset(&info
, 0, sizeof(struct rte_event_dev_info
));
147 ret
= rte_event_dev_info_get(opt
->dev_id
, &info
);
149 evt_err("failed to get eventdev info %d", opt
->dev_id
);
153 if (opt
->deq_tmo_nsec
) {
154 if (opt
->deq_tmo_nsec
< info
.min_dequeue_timeout_ns
) {
155 opt
->deq_tmo_nsec
= info
.min_dequeue_timeout_ns
;
156 evt_info("dequeue_timeout_ns too low, using %d",
159 if (opt
->deq_tmo_nsec
> info
.max_dequeue_timeout_ns
) {
160 opt
->deq_tmo_nsec
= info
.max_dequeue_timeout_ns
;
161 evt_info("dequeue_timeout_ns too high, using %d",
166 const struct rte_event_dev_config config
= {
167 .dequeue_timeout_ns
= opt
->deq_tmo_nsec
,
168 .nb_event_queues
= nb_queues
,
169 .nb_event_ports
= nb_ports
,
170 .nb_events_limit
= info
.max_num_events
,
171 .nb_event_queue_flows
= opt
->nb_flows
,
172 .nb_event_port_dequeue_depth
=
173 info
.max_event_port_dequeue_depth
,
174 .nb_event_port_enqueue_depth
=
175 info
.max_event_port_enqueue_depth
,
178 return rte_event_dev_configure(opt
->dev_id
, &config
);
181 #endif /* _EVT_COMMON_*/