1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include <rte_common.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_kvargs.h>
14 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_bus_vdev.h>
20 #include "ssovf_evdev.h"
21 #include "timvf_evdev.h"
23 int otx_logtype_ssovf
;
24 static uint8_t timvf_enable_stats
;
26 RTE_INIT(otx_ssovf_init_log
)
28 otx_logtype_ssovf
= rte_log_register("pmd.event.octeontx");
29 if (otx_logtype_ssovf
>= 0)
30 rte_log_set_level(otx_logtype_ssovf
, RTE_LOG_NOTICE
);
33 /* SSOPF Mailbox messages */
35 struct ssovf_mbox_dev_info
{
36 uint64_t min_deq_timeout_ns
;
37 uint64_t max_deq_timeout_ns
;
38 uint32_t max_num_events
;
42 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info
*info
)
44 struct octeontx_mbox_hdr hdr
= {0};
45 uint16_t len
= sizeof(struct ssovf_mbox_dev_info
);
47 hdr
.coproc
= SSO_COPROC
;
48 hdr
.msg
= SSO_GET_DEV_INFO
;
52 return octeontx_mbox_send(&hdr
, NULL
, 0, info
, len
);
55 struct ssovf_mbox_getwork_wait
{
60 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns
)
62 struct octeontx_mbox_hdr hdr
= {0};
63 struct ssovf_mbox_getwork_wait tmo_set
;
64 uint16_t len
= sizeof(struct ssovf_mbox_getwork_wait
);
67 hdr
.coproc
= SSO_COPROC
;
68 hdr
.msg
= SSO_SET_GETWORK_WAIT
;
71 tmo_set
.wait_ns
= timeout_ns
;
72 ret
= octeontx_mbox_send(&hdr
, &tmo_set
, len
, NULL
, 0);
74 ssovf_log_err("Failed to set getwork timeout(%d)", ret
);
79 struct ssovf_mbox_grp_pri
{
81 uint8_t wgt_left
; /* Read only */
88 ssovf_mbox_priority_set(uint8_t queue
, uint8_t prio
)
90 struct octeontx_mbox_hdr hdr
= {0};
91 struct ssovf_mbox_grp_pri grp
;
92 uint16_t len
= sizeof(struct ssovf_mbox_grp_pri
);
95 hdr
.coproc
= SSO_COPROC
;
96 hdr
.msg
= SSO_GRP_SET_PRIORITY
;
102 grp
.priority
= prio
/ 32; /* Normalize to 0 to 7 */
104 ret
= octeontx_mbox_send(&hdr
, &grp
, len
, NULL
, 0);
106 ssovf_log_err("Failed to set grp=%d prio=%d", queue
, prio
);
111 struct ssovf_mbox_convert_ns_getworks_iter
{
113 uint32_t getwork_iter
;/* Get_work iterations for the given wait_ns */
117 ssovf_mbox_timeout_ticks(uint64_t ns
, uint64_t *tmo_ticks
)
119 struct octeontx_mbox_hdr hdr
= {0};
120 struct ssovf_mbox_convert_ns_getworks_iter ns2iter
;
121 uint16_t len
= sizeof(ns2iter
);
124 hdr
.coproc
= SSO_COPROC
;
125 hdr
.msg
= SSO_CONVERT_NS_GETWORK_ITER
;
128 memset(&ns2iter
, 0, len
);
129 ns2iter
.wait_ns
= ns
;
130 ret
= octeontx_mbox_send(&hdr
, &ns2iter
, len
, &ns2iter
, len
);
131 if (ret
< 0 || (ret
!= len
)) {
132 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64
"", ns
);
136 *tmo_ticks
= ns2iter
.getwork_iter
;
141 ssovf_info_get(struct rte_eventdev
*dev
, struct rte_event_dev_info
*dev_info
)
143 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
145 dev_info
->driver_name
= RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD
);
146 dev_info
->min_dequeue_timeout_ns
= edev
->min_deq_timeout_ns
;
147 dev_info
->max_dequeue_timeout_ns
= edev
->max_deq_timeout_ns
;
148 dev_info
->max_event_queues
= edev
->max_event_queues
;
149 dev_info
->max_event_queue_flows
= (1ULL << 20);
150 dev_info
->max_event_queue_priority_levels
= 8;
151 dev_info
->max_event_priority_levels
= 1;
152 dev_info
->max_event_ports
= edev
->max_event_ports
;
153 dev_info
->max_event_port_dequeue_depth
= 1;
154 dev_info
->max_event_port_enqueue_depth
= 1;
155 dev_info
->max_num_events
= edev
->max_num_events
;
156 dev_info
->event_dev_cap
= RTE_EVENT_DEV_CAP_QUEUE_QOS
|
157 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
|
158 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES
|
159 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK
|
160 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
|
161 RTE_EVENT_DEV_CAP_NONSEQ_MODE
;
166 ssovf_configure(const struct rte_eventdev
*dev
)
168 struct rte_event_dev_config
*conf
= &dev
->data
->dev_conf
;
169 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
173 deq_tmo_ns
= conf
->dequeue_timeout_ns
;
175 deq_tmo_ns
= edev
->min_deq_timeout_ns
;
177 if (conf
->event_dev_cfg
& RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
) {
178 edev
->is_timeout_deq
= 1;
179 deq_tmo_ns
= edev
->min_deq_timeout_ns
;
181 edev
->nb_event_queues
= conf
->nb_event_queues
;
182 edev
->nb_event_ports
= conf
->nb_event_ports
;
184 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns
);
188 ssovf_queue_def_conf(struct rte_eventdev
*dev
, uint8_t queue_id
,
189 struct rte_event_queue_conf
*queue_conf
)
192 RTE_SET_USED(queue_id
);
194 queue_conf
->nb_atomic_flows
= (1ULL << 20);
195 queue_conf
->nb_atomic_order_sequences
= (1ULL << 20);
196 queue_conf
->event_queue_cfg
= RTE_EVENT_QUEUE_CFG_ALL_TYPES
;
197 queue_conf
->priority
= RTE_EVENT_DEV_PRIORITY_NORMAL
;
201 ssovf_queue_release(struct rte_eventdev
*dev
, uint8_t queue_id
)
204 RTE_SET_USED(queue_id
);
208 ssovf_queue_setup(struct rte_eventdev
*dev
, uint8_t queue_id
,
209 const struct rte_event_queue_conf
*queue_conf
)
212 ssovf_func_trace("queue=%d prio=%d", queue_id
, queue_conf
->priority
);
214 return ssovf_mbox_priority_set(queue_id
, queue_conf
->priority
);
218 ssovf_port_def_conf(struct rte_eventdev
*dev
, uint8_t port_id
,
219 struct rte_event_port_conf
*port_conf
)
221 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
223 RTE_SET_USED(port_id
);
224 port_conf
->new_event_threshold
= edev
->max_num_events
;
225 port_conf
->dequeue_depth
= 1;
226 port_conf
->enqueue_depth
= 1;
227 port_conf
->disable_implicit_release
= 0;
231 ssovf_port_release(void *port
)
237 ssovf_port_setup(struct rte_eventdev
*dev
, uint8_t port_id
,
238 const struct rte_event_port_conf
*port_conf
)
243 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
245 ssovf_func_trace("port=%d", port_id
);
246 RTE_SET_USED(port_conf
);
248 /* Free memory prior to re-allocation if needed */
249 if (dev
->data
->ports
[port_id
] != NULL
) {
250 ssovf_port_release(dev
->data
->ports
[port_id
]);
251 dev
->data
->ports
[port_id
] = NULL
;
254 /* Allocate event port memory */
255 ws
= rte_zmalloc_socket("eventdev ssows",
256 sizeof(struct ssows
), RTE_CACHE_LINE_SIZE
,
257 dev
->data
->socket_id
);
259 ssovf_log_err("Failed to alloc memory for port=%d", port_id
);
263 ws
->base
= ssovf_bar(OCTEONTX_SSO_HWS
, port_id
, 0);
264 if (ws
->base
== NULL
) {
266 ssovf_log_err("Failed to get hws base addr port=%d", port_id
);
270 reg_off
= SSOW_VHWS_OP_GET_WORK0
;
271 reg_off
|= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
272 reg_off
|= 1 << 16; /* Wait */
273 ws
->getwork
= ws
->base
+ reg_off
;
275 ws
->lookup_mem
= octeontx_fastpath_lookup_mem_get();
277 for (q
= 0; q
< edev
->nb_event_queues
; q
++) {
278 ws
->grps
[q
] = ssovf_bar(OCTEONTX_SSO_GROUP
, q
, 2);
279 if (ws
->grps
[q
] == NULL
) {
281 ssovf_log_err("Failed to get grp%d base addr", q
);
286 dev
->data
->ports
[port_id
] = ws
;
287 ssovf_log_dbg("port=%d ws=%p", port_id
, ws
);
292 ssovf_port_link(struct rte_eventdev
*dev
, void *port
, const uint8_t queues
[],
293 const uint8_t priorities
[], uint16_t nb_links
)
297 struct ssows
*ws
= port
;
299 ssovf_func_trace("port=%d nb_links=%d", ws
->port
, nb_links
);
301 RTE_SET_USED(priorities
);
303 for (link
= 0; link
< nb_links
; link
++) {
305 val
|= (1ULL << 24); /* Set membership */
306 ssovf_write64(val
, ws
->base
+ SSOW_VHWS_GRPMSK_CHGX(0));
308 return (int)nb_links
;
312 ssovf_port_unlink(struct rte_eventdev
*dev
, void *port
, uint8_t queues
[],
317 struct ssows
*ws
= port
;
319 ssovf_func_trace("port=%d nb_links=%d", ws
->port
, nb_unlinks
);
322 for (unlink
= 0; unlink
< nb_unlinks
; unlink
++) {
323 val
= queues
[unlink
];
324 val
&= ~(1ULL << 24); /* Clear membership */
325 ssovf_write64(val
, ws
->base
+ SSOW_VHWS_GRPMSK_CHGX(0));
327 return (int)nb_unlinks
;
331 ssovf_timeout_ticks(struct rte_eventdev
*dev
, uint64_t ns
, uint64_t *tmo_ticks
)
335 return ssovf_mbox_timeout_ticks(ns
, tmo_ticks
);
339 ssows_dump(struct ssows
*ws
, FILE *f
)
341 uint8_t *base
= ws
->base
;
344 fprintf(f
, "\t---------------port%d---------------\n", ws
->port
);
345 val
= ssovf_read64(base
+ SSOW_VHWS_TAG
);
346 fprintf(f
, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
347 (uint32_t)(val
& 0xffffffff), (int)(val
>> 32) & 0x3,
348 (int)(val
>> 34) & 0x1, (int)(val
>> 35) & 0x1,
349 (int)(val
>> 36) & 0x3ff, (int)(val
>> 48) & 0x3ff,
350 (int)(val
>> 63) & 0x1);
352 val
= ssovf_read64(base
+ SSOW_VHWS_WQP
);
353 fprintf(f
, "\twqp=0x%"PRIx64
"\n", val
);
355 val
= ssovf_read64(base
+ SSOW_VHWS_LINKS
);
356 fprintf(f
, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
357 (int)(val
& 0x3ff), (int)(val
>> 10) & 0x1,
358 (int)(val
>> 11) & 0x3ff, (int)(val
>> 26) & 0x1,
359 (int)(val
>> 27) & 0x1, (int)(val
>> 28) & 0x3ff);
361 val
= ssovf_read64(base
+ SSOW_VHWS_PENDTAG
);
362 fprintf(f
, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
363 (uint32_t)(val
& 0xffffffff), (int)(val
>> 32) & 0x3,
364 (int)(val
>> 56) & 0x1, (int)(val
>> 58) & 0x1,
365 (int)(val
>> 61) & 0x1, (int)(val
>> 62) & 0x1,
366 (int)(val
>> 63) & 0x1);
368 val
= ssovf_read64(base
+ SSOW_VHWS_PENDWQP
);
369 fprintf(f
, "\tpwqp=0x%"PRIx64
"\n", val
);
373 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev
*dev
,
374 const struct rte_eth_dev
*eth_dev
, uint32_t *caps
)
379 ret
= strncmp(eth_dev
->data
->name
, "eth_octeontx", 12);
381 *caps
= RTE_EVENT_ETH_RX_ADAPTER_SW_CAP
;
383 *caps
= RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT
;
389 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev
*dev
,
390 const struct rte_eth_dev
*eth_dev
, int32_t rx_queue_id
,
391 const struct rte_event_eth_rx_adapter_queue_conf
*queue_conf
)
394 const struct octeontx_nic
*nic
= eth_dev
->data
->dev_private
;
395 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
396 pki_mod_qos_t pki_qos
;
399 ret
= strncmp(eth_dev
->data
->name
, "eth_octeontx", 12);
403 if (rx_queue_id
>= 0)
406 if (queue_conf
->ev
.sched_type
== RTE_SCHED_TYPE_PARALLEL
)
409 memset(&pki_qos
, 0, sizeof(pki_mod_qos_t
));
411 pki_qos
.port_type
= 0;
413 pki_qos
.mmask
.f_tag_type
= 1;
414 pki_qos
.mmask
.f_port_add
= 1;
415 pki_qos
.mmask
.f_grp_ok
= 1;
416 pki_qos
.mmask
.f_grp_bad
= 1;
417 pki_qos
.mmask
.f_grptag_ok
= 1;
418 pki_qos
.mmask
.f_grptag_bad
= 1;
420 pki_qos
.qos_entry
.tag_type
= queue_conf
->ev
.sched_type
;
421 pki_qos
.qos_entry
.port_add
= 0;
422 pki_qos
.qos_entry
.ggrp_ok
= queue_conf
->ev
.queue_id
;
423 pki_qos
.qos_entry
.ggrp_bad
= queue_conf
->ev
.queue_id
;
424 pki_qos
.qos_entry
.grptag_bad
= 0;
425 pki_qos
.qos_entry
.grptag_ok
= 0;
427 ret
= octeontx_pki_port_modify_qos(nic
->port_id
, &pki_qos
);
429 ssovf_log_err("failed to modify QOS, port=%d, q=%d",
430 nic
->port_id
, queue_conf
->ev
.queue_id
);
432 edev
->rx_offload_flags
= nic
->rx_offload_flags
;
433 edev
->tx_offload_flags
= nic
->tx_offload_flags
;
438 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev
*dev
,
439 const struct rte_eth_dev
*eth_dev
, int32_t rx_queue_id
)
442 const struct octeontx_nic
*nic
= eth_dev
->data
->dev_private
;
443 pki_del_qos_t pki_qos
;
446 ret
= strncmp(eth_dev
->data
->name
, "eth_octeontx", 12);
450 pki_qos
.port_type
= 0;
452 memset(&pki_qos
, 0, sizeof(pki_del_qos_t
));
453 ret
= octeontx_pki_port_delete_qos(nic
->port_id
, &pki_qos
);
455 ssovf_log_err("Failed to delete QOS port=%d, q=%d",
456 nic
->port_id
, rx_queue_id
);
461 ssovf_eth_rx_adapter_start(const struct rte_eventdev
*dev
,
462 const struct rte_eth_dev
*eth_dev
)
465 RTE_SET_USED(eth_dev
);
472 ssovf_eth_rx_adapter_stop(const struct rte_eventdev
*dev
,
473 const struct rte_eth_dev
*eth_dev
)
476 RTE_SET_USED(eth_dev
);
482 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev
*dev
,
483 const struct rte_eth_dev
*eth_dev
, uint32_t *caps
)
488 ret
= strncmp(eth_dev
->data
->name
, "eth_octeontx", 12);
492 *caps
= RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT
;
498 ssovf_eth_tx_adapter_create(uint8_t id
, const struct rte_eventdev
*dev
)
506 ssovf_eth_tx_adapter_free(uint8_t id
, const struct rte_eventdev
*dev
)
514 ssovf_eth_tx_adapter_queue_add(uint8_t id
, const struct rte_eventdev
*dev
,
515 const struct rte_eth_dev
*eth_dev
, int32_t tx_queue_id
)
519 RTE_SET_USED(eth_dev
);
520 RTE_SET_USED(tx_queue_id
);
525 ssovf_eth_tx_adapter_queue_del(uint8_t id
, const struct rte_eventdev
*dev
,
526 const struct rte_eth_dev
*eth_dev
, int32_t tx_queue_id
)
530 RTE_SET_USED(eth_dev
);
531 RTE_SET_USED(tx_queue_id
);
536 ssovf_eth_tx_adapter_start(uint8_t id
, const struct rte_eventdev
*dev
)
544 ssovf_eth_tx_adapter_stop(uint8_t id
, const struct rte_eventdev
*dev
)
553 ssovf_dump(struct rte_eventdev
*dev
, FILE *f
)
555 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
558 /* Dump SSOWVF debug registers */
559 for (port
= 0; port
< edev
->nb_event_ports
; port
++)
560 ssows_dump(dev
->data
->ports
[port
], f
);
564 ssovf_start(struct rte_eventdev
*dev
)
566 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
572 for (i
= 0; i
< edev
->nb_event_ports
; i
++) {
573 ws
= dev
->data
->ports
[i
];
578 for (i
= 0; i
< edev
->nb_event_queues
; i
++) {
579 /* Consume all the events through HWS0 */
580 ssows_flush_events(dev
->data
->ports
[0], i
, NULL
, NULL
);
582 base
= ssovf_bar(OCTEONTX_SSO_GROUP
, i
, 0);
583 base
+= SSO_VHGRP_QCTL
;
584 ssovf_write64(1, base
); /* Enable SSO group */
587 ssovf_fastpath_fns_set(dev
);
592 ssows_handle_event(void *arg
, struct rte_event event
)
594 struct rte_eventdev
*dev
= arg
;
596 if (dev
->dev_ops
->dev_stop_flush
!= NULL
)
597 dev
->dev_ops
->dev_stop_flush(dev
->data
->dev_id
, event
,
598 dev
->data
->dev_stop_flush_arg
);
602 ssovf_stop(struct rte_eventdev
*dev
)
604 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
610 for (i
= 0; i
< edev
->nb_event_ports
; i
++) {
611 ws
= dev
->data
->ports
[i
];
616 for (i
= 0; i
< edev
->nb_event_queues
; i
++) {
617 /* Consume all the events through HWS0 */
618 ssows_flush_events(dev
->data
->ports
[0], i
,
619 ssows_handle_event
, dev
);
621 base
= ssovf_bar(OCTEONTX_SSO_GROUP
, i
, 0);
622 base
+= SSO_VHGRP_QCTL
;
623 ssovf_write64(0, base
); /* Disable SSO group */
628 ssovf_close(struct rte_eventdev
*dev
)
630 struct ssovf_evdev
*edev
= ssovf_pmd_priv(dev
);
631 uint8_t all_queues
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
634 for (i
= 0; i
< edev
->nb_event_queues
; i
++)
637 for (i
= 0; i
< edev
->nb_event_ports
; i
++)
638 ssovf_port_unlink(dev
, dev
->data
->ports
[i
], all_queues
,
639 edev
->nb_event_queues
);
644 ssovf_selftest(const char *key __rte_unused
, const char *value
,
648 *flag
= !!atoi(value
);
653 ssovf_timvf_caps_get(const struct rte_eventdev
*dev
, uint64_t flags
,
654 uint32_t *caps
, const struct rte_event_timer_adapter_ops
**ops
)
656 return timvf_timer_adapter_caps_get(dev
, flags
, caps
, ops
,
660 /* Initialize and register event driver with DPDK Application */
661 static struct rte_eventdev_ops ssovf_ops
= {
662 .dev_infos_get
= ssovf_info_get
,
663 .dev_configure
= ssovf_configure
,
664 .queue_def_conf
= ssovf_queue_def_conf
,
665 .queue_setup
= ssovf_queue_setup
,
666 .queue_release
= ssovf_queue_release
,
667 .port_def_conf
= ssovf_port_def_conf
,
668 .port_setup
= ssovf_port_setup
,
669 .port_release
= ssovf_port_release
,
670 .port_link
= ssovf_port_link
,
671 .port_unlink
= ssovf_port_unlink
,
672 .timeout_ticks
= ssovf_timeout_ticks
,
674 .eth_rx_adapter_caps_get
= ssovf_eth_rx_adapter_caps_get
,
675 .eth_rx_adapter_queue_add
= ssovf_eth_rx_adapter_queue_add
,
676 .eth_rx_adapter_queue_del
= ssovf_eth_rx_adapter_queue_del
,
677 .eth_rx_adapter_start
= ssovf_eth_rx_adapter_start
,
678 .eth_rx_adapter_stop
= ssovf_eth_rx_adapter_stop
,
680 .eth_tx_adapter_caps_get
= ssovf_eth_tx_adapter_caps_get
,
681 .eth_tx_adapter_create
= ssovf_eth_tx_adapter_create
,
682 .eth_tx_adapter_free
= ssovf_eth_tx_adapter_free
,
683 .eth_tx_adapter_queue_add
= ssovf_eth_tx_adapter_queue_add
,
684 .eth_tx_adapter_queue_del
= ssovf_eth_tx_adapter_queue_del
,
685 .eth_tx_adapter_start
= ssovf_eth_tx_adapter_start
,
686 .eth_tx_adapter_stop
= ssovf_eth_tx_adapter_stop
,
688 .timer_adapter_caps_get
= ssovf_timvf_caps_get
,
690 .dev_selftest
= test_eventdev_octeontx
,
693 .dev_start
= ssovf_start
,
694 .dev_stop
= ssovf_stop
,
695 .dev_close
= ssovf_close
699 ssovf_vdev_probe(struct rte_vdev_device
*vdev
)
701 struct ssovf_info oinfo
;
702 struct ssovf_mbox_dev_info info
;
703 struct ssovf_evdev
*edev
;
704 struct rte_eventdev
*eventdev
;
705 static int ssovf_init_once
;
711 static const char *const args
[] = {
713 TIMVF_ENABLE_STATS_ARG
,
717 name
= rte_vdev_device_name(vdev
);
718 /* More than one instance is not supported */
719 if (ssovf_init_once
) {
720 ssovf_log_err("Request to create >1 %s instance", name
);
724 params
= rte_vdev_device_args(vdev
);
725 if (params
!= NULL
&& params
[0] != '\0') {
726 struct rte_kvargs
*kvlist
= rte_kvargs_parse(params
, args
);
730 "Ignoring unsupported params supplied '%s'",
733 int ret
= rte_kvargs_process(kvlist
,
735 ssovf_selftest
, &selftest
);
737 ssovf_log_err("%s: Error in selftest", name
);
738 rte_kvargs_free(kvlist
);
742 ret
= rte_kvargs_process(kvlist
,
743 TIMVF_ENABLE_STATS_ARG
,
744 ssovf_selftest
, &timvf_enable_stats
);
746 ssovf_log_err("%s: Error in timvf stats", name
);
747 rte_kvargs_free(kvlist
);
752 rte_kvargs_free(kvlist
);
755 eventdev
= rte_event_pmd_vdev_init(name
, sizeof(struct ssovf_evdev
),
757 if (eventdev
== NULL
) {
758 ssovf_log_err("Failed to create eventdev vdev %s", name
);
761 eventdev
->dev_ops
= &ssovf_ops
;
763 /* For secondary processes, the primary has done all the work */
764 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
765 ssovf_fastpath_fns_set(eventdev
);
769 octeontx_mbox_init();
770 ret
= ssovf_info(&oinfo
);
772 ssovf_log_err("Failed to probe and validate ssovfs %d", ret
);
776 edev
= ssovf_pmd_priv(eventdev
);
777 edev
->max_event_ports
= oinfo
.total_ssowvfs
;
778 edev
->max_event_queues
= oinfo
.total_ssovfs
;
779 edev
->is_timeout_deq
= 0;
781 ret
= ssovf_mbox_dev_info(&info
);
782 if (ret
< 0 || ret
!= sizeof(struct ssovf_mbox_dev_info
)) {
783 ssovf_log_err("Failed to get mbox devinfo %d", ret
);
787 edev
->min_deq_timeout_ns
= info
.min_deq_timeout_ns
;
788 edev
->max_deq_timeout_ns
= info
.max_deq_timeout_ns
;
789 edev
->max_num_events
= info
.max_num_events
;
790 ssovf_log_dbg("min_deq_tmo=%"PRId64
" max_deq_tmo=%"PRId64
" max_evts=%d",
791 info
.min_deq_timeout_ns
, info
.max_deq_timeout_ns
,
792 info
.max_num_events
);
794 if (!edev
->max_event_ports
|| !edev
->max_event_queues
) {
795 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
796 edev
->max_event_queues
, edev
->max_event_ports
);
801 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
802 name
, oinfo
.domain
, edev
->max_event_queues
,
803 edev
->max_event_ports
);
807 test_eventdev_octeontx();
811 rte_event_pmd_vdev_uninit(name
);
816 ssovf_vdev_remove(struct rte_vdev_device
*vdev
)
820 name
= rte_vdev_device_name(vdev
);
821 ssovf_log_info("Closing %s", name
);
822 return rte_event_pmd_vdev_uninit(name
);
825 static struct rte_vdev_driver vdev_ssovf_pmd
= {
826 .probe
= ssovf_vdev_probe
,
827 .remove
= ssovf_vdev_remove
830 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD
, vdev_ssovf_pmd
);