1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include "l3fwd_event.h"
11 l3fwd_event_device_setup_internal_port(void)
13 struct l3fwd_event_resources
*evt_rsrc
= l3fwd_get_eventdev_rsrc();
14 struct rte_event_dev_config event_d_conf
= {
15 .nb_events_limit
= 4096,
16 .nb_event_queue_flows
= 1024,
17 .nb_event_port_dequeue_depth
= 128,
18 .nb_event_port_enqueue_depth
= 128
20 struct rte_event_dev_info dev_info
;
21 const uint8_t event_d_id
= 0; /* Always use first event device only */
22 uint32_t event_queue_cfg
= 0;
23 uint16_t ethdev_count
= 0;
24 uint16_t num_workers
= 0;
28 RTE_ETH_FOREACH_DEV(port_id
) {
29 if ((evt_rsrc
->port_mask
& (1 << port_id
)) == 0)
34 /* Event device configuration */
35 rte_event_dev_info_get(event_d_id
, &dev_info
);
37 /* Enable implicit release */
38 if (dev_info
.event_dev_cap
& RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE
)
39 evt_rsrc
->disable_implicit_release
= 0;
41 if (dev_info
.event_dev_cap
& RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES
)
42 event_queue_cfg
|= RTE_EVENT_QUEUE_CFG_ALL_TYPES
;
44 event_d_conf
.nb_event_queues
= ethdev_count
;
45 if (dev_info
.max_event_queues
< event_d_conf
.nb_event_queues
)
46 event_d_conf
.nb_event_queues
= dev_info
.max_event_queues
;
48 if (dev_info
.max_num_events
< event_d_conf
.nb_events_limit
)
49 event_d_conf
.nb_events_limit
= dev_info
.max_num_events
;
51 if (dev_info
.max_event_queue_flows
< event_d_conf
.nb_event_queue_flows
)
52 event_d_conf
.nb_event_queue_flows
=
53 dev_info
.max_event_queue_flows
;
55 if (dev_info
.max_event_port_dequeue_depth
<
56 event_d_conf
.nb_event_port_dequeue_depth
)
57 event_d_conf
.nb_event_port_dequeue_depth
=
58 dev_info
.max_event_port_dequeue_depth
;
60 if (dev_info
.max_event_port_enqueue_depth
<
61 event_d_conf
.nb_event_port_enqueue_depth
)
62 event_d_conf
.nb_event_port_enqueue_depth
=
63 dev_info
.max_event_port_enqueue_depth
;
65 num_workers
= rte_lcore_count();
66 if (dev_info
.max_event_ports
< num_workers
)
67 num_workers
= dev_info
.max_event_ports
;
69 event_d_conf
.nb_event_ports
= num_workers
;
70 evt_rsrc
->evp
.nb_ports
= num_workers
;
71 evt_rsrc
->evq
.nb_queues
= event_d_conf
.nb_event_queues
;
72 evt_rsrc
->has_burst
= !!(dev_info
.event_dev_cap
&
73 RTE_EVENT_DEV_CAP_BURST_MODE
);
75 ret
= rte_event_dev_configure(event_d_id
, &event_d_conf
);
77 rte_panic("Error in configuring event device\n");
79 evt_rsrc
->event_d_id
= event_d_id
;
80 return event_queue_cfg
;
84 l3fwd_event_port_setup_internal_port(void)
86 struct l3fwd_event_resources
*evt_rsrc
= l3fwd_get_eventdev_rsrc();
87 uint8_t event_d_id
= evt_rsrc
->event_d_id
;
88 struct rte_event_port_conf event_p_conf
= {
91 .new_event_threshold
= 4096
93 struct rte_event_port_conf def_p_conf
;
97 evt_rsrc
->evp
.event_p_id
= (uint8_t *)malloc(sizeof(uint8_t) *
98 evt_rsrc
->evp
.nb_ports
);
99 if (!evt_rsrc
->evp
.event_p_id
)
100 rte_panic("Failed to allocate memory for Event Ports\n");
102 ret
= rte_event_port_default_conf_get(event_d_id
, 0, &def_p_conf
);
104 rte_panic("Error to get default configuration of event port\n");
106 if (def_p_conf
.new_event_threshold
< event_p_conf
.new_event_threshold
)
107 event_p_conf
.new_event_threshold
=
108 def_p_conf
.new_event_threshold
;
110 if (def_p_conf
.dequeue_depth
< event_p_conf
.dequeue_depth
)
111 event_p_conf
.dequeue_depth
= def_p_conf
.dequeue_depth
;
113 if (def_p_conf
.enqueue_depth
< event_p_conf
.enqueue_depth
)
114 event_p_conf
.enqueue_depth
= def_p_conf
.enqueue_depth
;
116 event_p_conf
.disable_implicit_release
=
117 evt_rsrc
->disable_implicit_release
;
119 for (event_p_id
= 0; event_p_id
< evt_rsrc
->evp
.nb_ports
;
121 ret
= rte_event_port_setup(event_d_id
, event_p_id
,
124 rte_panic("Error in configuring event port %d\n",
127 ret
= rte_event_port_link(event_d_id
, event_p_id
, NULL
,
130 rte_panic("Error in linking event port %d to queue\n",
132 evt_rsrc
->evp
.event_p_id
[event_p_id
] = event_p_id
;
135 rte_spinlock_init(&evt_rsrc
->evp
.lock
);
138 evt_rsrc
->def_p_conf
= event_p_conf
;
142 l3fwd_event_queue_setup_internal_port(uint32_t event_queue_cfg
)
144 struct l3fwd_event_resources
*evt_rsrc
= l3fwd_get_eventdev_rsrc();
145 uint8_t event_d_id
= evt_rsrc
->event_d_id
;
146 struct rte_event_queue_conf event_q_conf
= {
147 .nb_atomic_flows
= 1024,
148 .nb_atomic_order_sequences
= 1024,
149 .event_queue_cfg
= event_queue_cfg
,
150 .priority
= RTE_EVENT_DEV_PRIORITY_NORMAL
152 struct rte_event_queue_conf def_q_conf
;
153 uint8_t event_q_id
= 0;
156 ret
= rte_event_queue_default_conf_get(event_d_id
, event_q_id
,
159 rte_panic("Error to get default config of event queue\n");
161 if (def_q_conf
.nb_atomic_flows
< event_q_conf
.nb_atomic_flows
)
162 event_q_conf
.nb_atomic_flows
= def_q_conf
.nb_atomic_flows
;
164 if (def_q_conf
.nb_atomic_order_sequences
<
165 event_q_conf
.nb_atomic_order_sequences
)
166 event_q_conf
.nb_atomic_order_sequences
=
167 def_q_conf
.nb_atomic_order_sequences
;
169 event_q_conf
.event_queue_cfg
= event_queue_cfg
;
170 event_q_conf
.schedule_type
= evt_rsrc
->sched_type
;
171 evt_rsrc
->evq
.event_q_id
= (uint8_t *)malloc(sizeof(uint8_t) *
172 evt_rsrc
->evq
.nb_queues
);
173 if (!evt_rsrc
->evq
.event_q_id
)
174 rte_panic("Memory allocation failure\n");
176 for (event_q_id
= 0; event_q_id
< evt_rsrc
->evq
.nb_queues
;
178 ret
= rte_event_queue_setup(event_d_id
, event_q_id
,
181 rte_panic("Error in configuring event queue\n");
182 evt_rsrc
->evq
.event_q_id
[event_q_id
] = event_q_id
;
187 l3fwd_rx_tx_adapter_setup_internal_port(void)
189 struct l3fwd_event_resources
*evt_rsrc
= l3fwd_get_eventdev_rsrc();
190 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf
;
191 uint8_t event_d_id
= evt_rsrc
->event_d_id
;
192 uint16_t adapter_id
= 0;
193 uint16_t nb_adapter
= 0;
198 memset(ð_q_conf
, 0, sizeof(eth_q_conf
));
199 eth_q_conf
.ev
.priority
= RTE_EVENT_DEV_PRIORITY_NORMAL
;
201 RTE_ETH_FOREACH_DEV(port_id
) {
202 if ((evt_rsrc
->port_mask
& (1 << port_id
)) == 0)
207 evt_rsrc
->rx_adptr
.nb_rx_adptr
= nb_adapter
;
208 evt_rsrc
->rx_adptr
.rx_adptr
= (uint8_t *)malloc(sizeof(uint8_t) *
209 evt_rsrc
->rx_adptr
.nb_rx_adptr
);
210 if (!evt_rsrc
->rx_adptr
.rx_adptr
) {
211 free(evt_rsrc
->evp
.event_p_id
);
212 free(evt_rsrc
->evq
.event_q_id
);
213 rte_panic("Failed to allocate memory for Rx adapter\n");
217 RTE_ETH_FOREACH_DEV(port_id
) {
218 if ((evt_rsrc
->port_mask
& (1 << port_id
)) == 0)
220 ret
= rte_event_eth_rx_adapter_create(adapter_id
, event_d_id
,
221 &evt_rsrc
->def_p_conf
);
223 rte_panic("Failed to create rx adapter[%d]\n",
226 /* Configure user requested sched type*/
227 eth_q_conf
.ev
.sched_type
= evt_rsrc
->sched_type
;
228 eth_q_conf
.ev
.queue_id
= evt_rsrc
->evq
.event_q_id
[q_id
];
229 ret
= rte_event_eth_rx_adapter_queue_add(adapter_id
, port_id
,
232 rte_panic("Failed to add queues to Rx adapter\n");
234 ret
= rte_event_eth_rx_adapter_start(adapter_id
);
236 rte_panic("Rx adapter[%d] start Failed\n", adapter_id
);
238 evt_rsrc
->rx_adptr
.rx_adptr
[adapter_id
] = adapter_id
;
240 if (q_id
< evt_rsrc
->evq
.nb_queues
)
244 evt_rsrc
->tx_adptr
.nb_tx_adptr
= nb_adapter
;
245 evt_rsrc
->tx_adptr
.tx_adptr
= (uint8_t *)malloc(sizeof(uint8_t) *
246 evt_rsrc
->tx_adptr
.nb_tx_adptr
);
247 if (!evt_rsrc
->tx_adptr
.tx_adptr
) {
248 free(evt_rsrc
->rx_adptr
.rx_adptr
);
249 free(evt_rsrc
->evp
.event_p_id
);
250 free(evt_rsrc
->evq
.event_q_id
);
251 rte_panic("Failed to allocate memory for Rx adapter\n");
255 RTE_ETH_FOREACH_DEV(port_id
) {
256 if ((evt_rsrc
->port_mask
& (1 << port_id
)) == 0)
258 ret
= rte_event_eth_tx_adapter_create(adapter_id
, event_d_id
,
259 &evt_rsrc
->def_p_conf
);
261 rte_panic("Failed to create tx adapter[%d]\n",
264 ret
= rte_event_eth_tx_adapter_queue_add(adapter_id
, port_id
,
267 rte_panic("Failed to add queues to Tx adapter\n");
269 ret
= rte_event_eth_tx_adapter_start(adapter_id
);
271 rte_panic("Tx adapter[%d] start Failed\n", adapter_id
);
273 evt_rsrc
->tx_adptr
.tx_adptr
[adapter_id
] = adapter_id
;
279 l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops
*ops
)
281 ops
->event_device_setup
= l3fwd_event_device_setup_internal_port
;
282 ops
->event_queue_setup
= l3fwd_event_queue_setup_internal_port
;
283 ops
->event_port_setup
= l3fwd_event_port_setup_internal_port
;
284 ops
->adapter_setup
= l3fwd_rx_tx_adapter_setup_internal_port
;