1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
20 #define MAX_INTR_EVENTS 16
23 * union buffer for reading on different devices
25 union rte_intr_read_buffer
{
26 char charbuf
[16]; /* for others */
29 TAILQ_HEAD(rte_intr_cb_list
, rte_intr_callback
);
30 TAILQ_HEAD(rte_intr_source_list
, rte_intr_source
);
32 struct rte_intr_callback
{
33 TAILQ_ENTRY(rte_intr_callback
) next
;
34 rte_intr_callback_fn cb_fn
; /**< callback address */
35 void *cb_arg
; /**< parameter for callback */
36 uint8_t pending_delete
; /**< delete after callback is called */
37 rte_intr_unregister_callback_fn ucb_fn
; /**< fn to call before cb is deleted */
40 struct rte_intr_source
{
41 TAILQ_ENTRY(rte_intr_source
) next
;
42 struct rte_intr_handle intr_handle
; /**< interrupt handle */
43 struct rte_intr_cb_list callbacks
; /**< user callbacks */
47 /* global spinlock for interrupt data operation */
48 static rte_spinlock_t intr_lock
= RTE_SPINLOCK_INITIALIZER
;
50 /* interrupt sources list */
51 static struct rte_intr_source_list intr_sources
;
53 /* interrupt handling thread */
54 static pthread_t intr_thread
;
56 static volatile int kq
= -1;
59 intr_source_to_kevent(const struct rte_intr_handle
*ih
, struct kevent
*ke
)
61 /* alarm callbacks are special case */
62 if (ih
->type
== RTE_INTR_HANDLE_ALARM
) {
65 /* get soonest alarm timeout */
66 if (eal_alarm_get_timeout_ns(&timeout_ns
) < 0)
69 ke
->filter
= EVFILT_TIMER
;
70 /* timers are one shot */
71 ke
->flags
|= EV_ONESHOT
;
72 ke
->fflags
= NOTE_NSECONDS
;
73 ke
->data
= timeout_ns
;
75 ke
->filter
= EVFILT_READ
;
83 rte_intr_callback_register(const struct rte_intr_handle
*intr_handle
,
84 rte_intr_callback_fn cb
, void *cb_arg
)
86 struct rte_intr_callback
*callback
= NULL
;
87 struct rte_intr_source
*src
= NULL
;
90 /* first do parameter checking */
91 if (intr_handle
== NULL
|| intr_handle
->fd
< 0 || cb
== NULL
) {
93 "Registering with invalid input parameter\n");
97 RTE_LOG(ERR
, EAL
, "Kqueue is not active: %d\n", kq
);
101 /* allocate a new interrupt callback entity */
102 callback
= calloc(1, sizeof(*callback
));
103 if (callback
== NULL
) {
104 RTE_LOG(ERR
, EAL
, "Can not allocate memory\n");
107 callback
->cb_fn
= cb
;
108 callback
->cb_arg
= cb_arg
;
109 callback
->pending_delete
= 0;
110 callback
->ucb_fn
= NULL
;
112 rte_spinlock_lock(&intr_lock
);
114 /* check if there is at least one callback registered for the fd */
115 TAILQ_FOREACH(src
, &intr_sources
, next
) {
116 if (src
->intr_handle
.fd
== intr_handle
->fd
) {
117 /* we had no interrupts for this */
118 if (TAILQ_EMPTY(&src
->callbacks
))
121 TAILQ_INSERT_TAIL(&(src
->callbacks
), callback
, next
);
127 /* no existing callbacks for this - add new source */
129 src
= calloc(1, sizeof(*src
));
131 RTE_LOG(ERR
, EAL
, "Can not allocate memory\n");
135 src
->intr_handle
= *intr_handle
;
136 TAILQ_INIT(&src
->callbacks
);
137 TAILQ_INSERT_TAIL(&(src
->callbacks
), callback
, next
);
138 TAILQ_INSERT_TAIL(&intr_sources
, src
, next
);
144 /* add events to the queue. timer events are special as we need to
147 if (add_event
|| src
->intr_handle
.type
== RTE_INTR_HANDLE_ALARM
) {
150 memset(&ke
, 0, sizeof(ke
));
151 ke
.flags
= EV_ADD
; /* mark for addition to the queue */
153 if (intr_source_to_kevent(intr_handle
, &ke
) < 0) {
154 RTE_LOG(ERR
, EAL
, "Cannot convert interrupt handle to kevent\n");
160 * add the intr file descriptor into wait list.
162 if (kevent(kq
, &ke
, 1, NULL
, 0, NULL
) < 0) {
163 /* currently, nic_uio does not support interrupts, so
164 * this error will always be triggered and output to the
165 * user. so, don't output it unless debug log level set.
168 RTE_LOG(DEBUG
, EAL
, "Interrupt handle %d not supported\n",
169 src
->intr_handle
.fd
);
171 RTE_LOG(ERR
, EAL
, "Error adding fd %d "
179 rte_spinlock_unlock(&intr_lock
);
185 TAILQ_REMOVE(&(src
->callbacks
), callback
, next
);
186 if (TAILQ_EMPTY(&(src
->callbacks
))) {
187 TAILQ_REMOVE(&intr_sources
, src
, next
);
192 rte_spinlock_unlock(&intr_lock
);
196 int __rte_experimental
197 rte_intr_callback_unregister_pending(const struct rte_intr_handle
*intr_handle
,
198 rte_intr_callback_fn cb_fn
, void *cb_arg
,
199 rte_intr_unregister_callback_fn ucb_fn
)
202 struct rte_intr_source
*src
;
203 struct rte_intr_callback
*cb
, *next
;
205 /* do parameter checking first */
206 if (intr_handle
== NULL
|| intr_handle
->fd
< 0) {
208 "Unregistering with invalid input parameter\n");
213 RTE_LOG(ERR
, EAL
, "Kqueue is not active\n");
217 rte_spinlock_lock(&intr_lock
);
219 /* check if the insterrupt source for the fd is existent */
220 TAILQ_FOREACH(src
, &intr_sources
, next
)
221 if (src
->intr_handle
.fd
== intr_handle
->fd
)
224 /* No interrupt source registered for the fd */
228 /* only usable if the source is active */
229 } else if (src
->active
== 0) {
235 /* walk through the callbacks and mark all that match. */
236 for (cb
= TAILQ_FIRST(&src
->callbacks
); cb
!= NULL
; cb
= next
) {
237 next
= TAILQ_NEXT(cb
, next
);
238 if (cb
->cb_fn
== cb_fn
&& (cb_arg
== (void *)-1 ||
239 cb
->cb_arg
== cb_arg
)) {
240 cb
->pending_delete
= 1;
247 rte_spinlock_unlock(&intr_lock
);
253 rte_intr_callback_unregister(const struct rte_intr_handle
*intr_handle
,
254 rte_intr_callback_fn cb_fn
, void *cb_arg
)
257 struct rte_intr_source
*src
;
258 struct rte_intr_callback
*cb
, *next
;
260 /* do parameter checking first */
261 if (intr_handle
== NULL
|| intr_handle
->fd
< 0) {
263 "Unregistering with invalid input parameter\n");
267 RTE_LOG(ERR
, EAL
, "Kqueue is not active\n");
271 rte_spinlock_lock(&intr_lock
);
273 /* check if the insterrupt source for the fd is existent */
274 TAILQ_FOREACH(src
, &intr_sources
, next
)
275 if (src
->intr_handle
.fd
== intr_handle
->fd
)
278 /* No interrupt source registered for the fd */
282 /* interrupt source has some active callbacks right now. */
283 } else if (src
->active
!= 0) {
292 /* remove it from the kqueue */
293 memset(&ke
, 0, sizeof(ke
));
294 ke
.flags
= EV_DELETE
; /* mark for deletion from the queue */
296 if (intr_source_to_kevent(intr_handle
, &ke
) < 0) {
297 RTE_LOG(ERR
, EAL
, "Cannot convert to kevent\n");
303 * remove intr file descriptor from wait list.
305 if (kevent(kq
, &ke
, 1, NULL
, 0, NULL
) < 0) {
306 RTE_LOG(ERR
, EAL
, "Error removing fd %d kevent, %s\n",
307 src
->intr_handle
.fd
, strerror(errno
));
308 /* removing non-existent even is an expected condition
309 * in some circumstances (e.g. oneshot events).
313 /*walk through the callbacks and remove all that match. */
314 for (cb
= TAILQ_FIRST(&src
->callbacks
); cb
!= NULL
; cb
= next
) {
315 next
= TAILQ_NEXT(cb
, next
);
316 if (cb
->cb_fn
== cb_fn
&& (cb_arg
== (void *)-1 ||
317 cb
->cb_arg
== cb_arg
)) {
318 TAILQ_REMOVE(&src
->callbacks
, cb
, next
);
324 /* all callbacks for that source are removed. */
325 if (TAILQ_EMPTY(&src
->callbacks
)) {
326 TAILQ_REMOVE(&intr_sources
, src
, next
);
331 rte_spinlock_unlock(&intr_lock
);
337 rte_intr_enable(const struct rte_intr_handle
*intr_handle
)
339 if (intr_handle
&& intr_handle
->type
== RTE_INTR_HANDLE_VDEV
)
342 if (!intr_handle
|| intr_handle
->fd
< 0 || intr_handle
->uio_cfg_fd
< 0)
345 switch (intr_handle
->type
) {
346 /* not used at this moment */
347 case RTE_INTR_HANDLE_ALARM
:
349 /* not used at this moment */
350 case RTE_INTR_HANDLE_DEV_EVENT
:
352 /* unknown handle type */
355 "Unknown handle type of fd %d\n",
364 rte_intr_disable(const struct rte_intr_handle
*intr_handle
)
366 if (intr_handle
&& intr_handle
->type
== RTE_INTR_HANDLE_VDEV
)
369 if (!intr_handle
|| intr_handle
->fd
< 0 || intr_handle
->uio_cfg_fd
< 0)
372 switch (intr_handle
->type
) {
373 /* not used at this moment */
374 case RTE_INTR_HANDLE_ALARM
:
376 /* not used at this moment */
377 case RTE_INTR_HANDLE_DEV_EVENT
:
379 /* unknown handle type */
382 "Unknown handle type of fd %d\n",
391 eal_intr_process_interrupts(struct kevent
*events
, int nfds
)
393 struct rte_intr_callback active_cb
;
394 union rte_intr_read_buffer buf
;
395 struct rte_intr_callback
*cb
, *next
;
396 struct rte_intr_source
*src
;
401 for (n
= 0; n
< nfds
; n
++) {
402 int event_fd
= events
[n
].ident
;
404 rte_spinlock_lock(&intr_lock
);
405 TAILQ_FOREACH(src
, &intr_sources
, next
)
406 if (src
->intr_handle
.fd
== event_fd
)
409 rte_spinlock_unlock(&intr_lock
);
413 /* mark this interrupt source as active and release the lock. */
415 rte_spinlock_unlock(&intr_lock
);
417 /* set the length to be read dor different handle type */
418 switch (src
->intr_handle
.type
) {
419 case RTE_INTR_HANDLE_ALARM
:
423 case RTE_INTR_HANDLE_VDEV
:
424 case RTE_INTR_HANDLE_EXT
:
428 case RTE_INTR_HANDLE_DEV_EVENT
:
437 if (bytes_read
> 0) {
439 * read out to clear the ready-to-be-read flag
442 bytes_read
= read(event_fd
, &buf
, bytes_read
);
443 if (bytes_read
< 0) {
444 if (errno
== EINTR
|| errno
== EWOULDBLOCK
)
447 RTE_LOG(ERR
, EAL
, "Error reading from file "
448 "descriptor %d: %s\n",
451 } else if (bytes_read
== 0)
452 RTE_LOG(ERR
, EAL
, "Read nothing from file "
453 "descriptor %d\n", event_fd
);
458 /* grab a lock, again to call callbacks and update status. */
459 rte_spinlock_lock(&intr_lock
);
462 /* Finally, call all callbacks. */
463 TAILQ_FOREACH(cb
, &src
->callbacks
, next
) {
465 /* make a copy and unlock. */
467 rte_spinlock_unlock(&intr_lock
);
469 /* call the actual callback */
470 active_cb
.cb_fn(active_cb
.cb_arg
);
472 /*get the lock back. */
473 rte_spinlock_lock(&intr_lock
);
477 /* we done with that interrupt source, release it. */
480 /* check if any callback are supposed to be removed */
481 for (cb
= TAILQ_FIRST(&src
->callbacks
); cb
!= NULL
; cb
= next
) {
482 next
= TAILQ_NEXT(cb
, next
);
483 if (cb
->pending_delete
) {
484 /* remove it from the kqueue */
485 memset(&ke
, 0, sizeof(ke
));
486 /* mark for deletion from the queue */
487 ke
.flags
= EV_DELETE
;
489 if (intr_source_to_kevent(&src
->intr_handle
, &ke
) < 0) {
490 RTE_LOG(ERR
, EAL
, "Cannot convert to kevent\n");
491 rte_spinlock_unlock(&intr_lock
);
496 * remove intr file descriptor from wait list.
498 if (kevent(kq
, &ke
, 1, NULL
, 0, NULL
) < 0) {
499 RTE_LOG(ERR
, EAL
, "Error removing fd %d kevent, "
500 "%s\n", src
->intr_handle
.fd
,
502 /* removing non-existent even is an expected
503 * condition in some circumstances
504 * (e.g. oneshot events).
508 TAILQ_REMOVE(&src
->callbacks
, cb
, next
);
510 cb
->ucb_fn(&src
->intr_handle
, cb
->cb_arg
);
515 /* all callbacks for that source are removed. */
516 if (TAILQ_EMPTY(&src
->callbacks
)) {
517 TAILQ_REMOVE(&intr_sources
, src
, next
);
521 rte_spinlock_unlock(&intr_lock
);
526 eal_intr_thread_main(void *arg __rte_unused
)
528 struct kevent events
[MAX_INTR_EVENTS
];
531 /* host thread, never break out */
533 /* do not change anything, just wait */
534 nfds
= kevent(kq
, NULL
, 0, events
, MAX_INTR_EVENTS
, NULL
);
541 "kevent returns with fail\n");
544 /* kevent timeout, will never happen here */
548 /* kevent has at least one fd ready to read */
549 eal_intr_process_interrupts(events
, nfds
);
557 rte_eal_intr_init(void)
561 /* init the global interrupt source head */
562 TAILQ_INIT(&intr_sources
);
566 RTE_LOG(ERR
, EAL
, "Cannot create kqueue instance\n");
570 /* create the host thread to wait/handle the interrupt */
571 ret
= rte_ctrl_thread_create(&intr_thread
, "eal-intr-thread", NULL
,
572 eal_intr_thread_main
, NULL
);
576 "Failed to create thread for interrupt handling\n");
583 rte_intr_rx_ctl(struct rte_intr_handle
*intr_handle
,
584 int epfd
, int op
, unsigned int vec
, void *data
)
586 RTE_SET_USED(intr_handle
);
596 rte_intr_efd_enable(struct rte_intr_handle
*intr_handle
, uint32_t nb_efd
)
598 RTE_SET_USED(intr_handle
);
599 RTE_SET_USED(nb_efd
);
605 rte_intr_efd_disable(struct rte_intr_handle
*intr_handle
)
607 RTE_SET_USED(intr_handle
);
611 rte_intr_dp_is_en(struct rte_intr_handle
*intr_handle
)
613 RTE_SET_USED(intr_handle
);
618 rte_intr_allow_others(struct rte_intr_handle
*intr_handle
)
620 RTE_SET_USED(intr_handle
);
625 rte_intr_cap_multiple(struct rte_intr_handle
*intr_handle
)
627 RTE_SET_USED(intr_handle
);
632 rte_epoll_wait(int epfd
, struct rte_epoll_event
*events
,
633 int maxevents
, int timeout
)
636 RTE_SET_USED(events
);
637 RTE_SET_USED(maxevents
);
638 RTE_SET_USED(timeout
);
644 rte_epoll_ctl(int epfd
, int op
, int fd
, struct rte_epoll_event
*event
)
655 rte_intr_tls_epfd(void)
661 rte_intr_free_epoll_fd(struct rte_intr_handle
*intr_handle
)
663 RTE_SET_USED(intr_handle
);